New Defects reported by Coverity Scan for ceph (fwd)

2014-10-08 Thread Sage Weil
--- Begin Message ---


Hi,

Please find the latest report on new defect(s) introduced to ceph found with 
Coverity Scan.

6 new defect(s) introduced to ceph found with Coverity Scan.
3 defect(s), reported by Coverity Scan earlier, were marked fixed in the recent 
build analyzed by Coverity Scan.

New defect(s) Reported-by: Coverity Scan
Showing 6 of 6 defect(s)


** CID 1244195:  Dereference after null check  (FORWARD_NULL)
/mon/Monitor.cc: 3110 in Monitor::_ms_dispatch(Message *)()

** CID 1244196:  Data race condition  (MISSING_LOCK)
/test/mon/test-mon-msg.cc: 229 in MonMsgTest::SetUp()()

** CID 1244197:  Dereference before null check  (REVERSE_INULL)
/mon/Monitor.cc: 3366 in Monitor::dispatch(MonSession *, Message *, bool)()

** CID 1244198:  Uncaught exception  (UNCAUGHT_EXCEPT)
/test/mon/test-mon-msg.cc: 322 in main()
/test/mon/test-mon-msg.cc: 322 in main()
/test/mon/test-mon-msg.cc: 322 in main()

** CID 1244200:  Uninitialized pointer field  (UNINIT_CTOR)
/test/mon/test-mon-msg.cc: 67 in MonClientHelper::MonClientHelper(CephContext 
*)()

** CID 1244199:  Uninitialized pointer field  (UNINIT_CTOR)
/test/mon/test-mon-msg.cc: 225 in MonMsgTest::MonMsgTest()()



*** CID 1244195:  Dereference after null check  (FORWARD_NULL)
/mon/Monitor.cc: 3110 in Monitor::_ms_dispatch(Message *)()
3104   if (s && s->closed) {
3105 caps = s->caps;
3106 reuse_caps = true;
3107 s->put();
3108 s = NULL;
3109   }
>>> CID 1244195:  Dereference after null check  (FORWARD_NULL)
>>> Comparing "s" to null implies that "s" might be null.
3110   if (!s) {
3111 // if the sender is not a monitor, make sure their first message 
for a
3112 // session is an MAuth.  If it is not, assume it's a stray message,
3113 // and considering that we are creating a new session it is safe to
3114 // assume that the sender hasn't authenticated yet, so we have no 
way
3115 // of assessing whether we should handle it or not.


*** CID 1244196:  Data race condition  (MISSING_LOCK)
/test/mon/test-mon-msg.cc: 229 in MonMsgTest::SetUp()()
223   MonMsgTest() :
224 MonClientHelper(g_ceph_context),
225 lock("lock") { }
226 
227 public:
228   virtual void SetUp() {
>>> CID 1244196:  Data race condition  (MISSING_LOCK)
>>> Accessing "this->reply_type" without holding lock "Mutex._m". 
>>> Elsewhere, "MonMsgTest.reply_type" is accessed with "Mutex._m" held 1 out 
>>> of 2 times (1 of these accesses strongly imply that it is necessary).
229 reply_type = -1;
230 if (reply_msg) {
231   reply_msg->put();
232   reply_msg = NULL;
233 }
234 ASSERT_EQ(init(), 0);


*** CID 1244197:  Dereference before null check  (REVERSE_INULL)
/mon/Monitor.cc: 3366 in Monitor::dispatch(MonSession *, Message *, bool)()
3360   }
3361   break;
3362 
3363 // elector messages
3364 case MSG_MON_ELECTION:
3365   //check privileges here for simplicity
>>> CID 1244197:  Dereference before null check  (REVERSE_INULL)
>>> Null-checking "s" suggests that it may be null, but it has already been 
>>> dereferenced on all paths leading to the check.
3366   if (s &&
3367   !s->is_capable("mon", MON_CAP_X)) {
3368 dout(0) << "MMonElection received from entity without enough 
caps!"
3369   << s->caps << dendl;
3370 m->put();
3371 break;


*** CID 1244198:  Uncaught exception  (UNCAUGHT_EXCEPT)
/test/mon/test-mon-msg.cc: 322 in main()
316 
317   int r = monc.get_monmap();
318   ASSERT_EQ(r, 0);
319   ASSERT_FALSE(monc.monmap.contains("client"));
320 }
321 
>>> CID 1244198:  Uncaught exception  (UNCAUGHT_EXCEPT)
>>> In function "main(int, char **)" an exception of type 
>>> "ceph::FailedAssertion" is thrown and never caught.
322 int main(int argc, char *argv[])
323 {
324   vector def_args;
325   vector args;
326   argv_to_vec(argc, (const char **)argv, args);
327 
/test/mon/test-mon-msg.cc: 322 in main()
316 
317   int r = monc.get_monmap();
318   ASSERT_EQ(r, 0);
319   ASSERT_FALSE(monc.monmap.contains("client"));
320 }
321 
>>> CID 1244198:  Uncaught exception  (UNCAUGHT_EXCEPT)
>>> In function "main(int, char **)" an exception of type 
>>> "ceph::FailedAssertion" is thrown and never caught.
322 int main(int argc, char *argv[])
323 {
324   vector def_args;
325   vector args;
326   argv_to

Re: New Defects reported by Coverity Scan for ceph (fwd)

2014-09-30 Thread Ric Wheeler

On 09/30/2014 01:38 PM, Sage Weil wrote:

On Tue, 30 Sep 2014, Gregory Farnum wrote:

On Tue, Sep 30, 2014 at 6:59 AM, Sage Weil  wrote:

Looks like recent changes from Greg, Loic, and I.

-- Forwarded message --
From: scan-ad...@coverity.com
To: undisclosed-recipients:;
Cc:
Date: Tue, 30 Sep 2014 06:21:08 -0700
Subject: New Defects reported by Coverity Scan for ceph


Hi,


Please find the latest report on new defect(s) introduced to ceph found with 
Coverity Scan.

Defect(s) Reported-by: Coverity Scan
Showing 4 of 4 defect(s)


** CID 1242019:  Data race condition  (MISSING_LOCK)
/msg/Pipe.cc: 230 in Pipe::DelayedDelivery::entry()()

** CID 1242021:  Resource leak  (RESOURCE_LEAK)
/test/librados/tier.cc: 1026 in LibRadosTwoPoolsPP_EvictSnap2_Test::TestBody()()
/test/librados/tier.cc: 1022 in LibRadosTwoPoolsPP_EvictSnap2_Test::TestBody()()
/test/librados/tier.cc: 1040 in LibRadosTwoPoolsPP_EvictSnap2_Test::TestBody()()
/test/librados/tier.cc: 1037 in LibRadosTwoPoolsPP_EvictSnap2_Test::TestBody()()

** CID 1242020:  Resource leak  (RESOURCE_LEAK)
/test/librados/aio.cc: 168 in LibRadosAio_TooBig_Test::TestBody()()

** CID 1242018:  Resource leak  (RESOURCE_LEAK)
/test/librados/aio.cc: 188 in LibRadosAio_TooBigPP_Test::TestBody()()
/test/librados/aio.cc: 190 in LibRadosAio_TooBigPP_Test::TestBody()()
/test/librados/aio.cc: 187 in LibRadosAio_TooBigPP_Test::TestBody()()



*** CID 1242019:  Data race condition  (MISSING_LOCK)
/msg/Pipe.cc: 230 in Pipe::DelayedDelivery::entry()()
224 if (flush_count > 0) {
225   --flush_count;
226   active_flush = true;
227 }
228 if (pipe->in_q->can_fast_dispatch(m)) {
229   if (!stop_fast_dispatching_flag) {

 CID 1242019:  Data race condition  (MISSING_LOCK)
 Accessing "this->delay_dispatching" without holding lock "Mutex._m". Elsewhere, 
"_ZN4Pipe15DelayedDeliveryE.delay_dispatching" is accessed with "Mutex._m" held 1 out of 2 times (1 of 
these accesses strongly imply that it is necessary).

230 delay_dispatching = true;
231 delay_lock.Unlock();
232 pipe->in_q->fast_dispatch(m);
233 delay_lock.Lock();
234 delay_dispatching = false;
235 if (stop_fast_dispatching_flag) {

This one's a false positive. (delay_dispatching is protected by the
delay_lock, but I think it's picking up on the Pipe::lock which is
held when DelayedDelivery is constructed and initialized.) Is there a
way I should annotate this, or is it something we need to adjust in
the Coverity web interface?

There are annotations but I don't know how they work.  I've been marking
them through the web interface...

sage



Jeff and Kaleb (last I remember) had more expertise in coverity magic - they 
might know how to annotate those false positives...


ric

--
To unsubscribe from this list: send the line "unsubscribe ceph-devel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: New Defects reported by Coverity Scan for ceph (fwd)

2014-09-30 Thread Sage Weil
On Tue, 30 Sep 2014, Gregory Farnum wrote:
> On Tue, Sep 30, 2014 at 6:59 AM, Sage Weil  wrote:
> > Looks like recent changes from Greg, Loic, and I.
> >
> > -- Forwarded message --
> > From: scan-ad...@coverity.com
> > To: undisclosed-recipients:;
> > Cc:
> > Date: Tue, 30 Sep 2014 06:21:08 -0700
> > Subject: New Defects reported by Coverity Scan for ceph
> >
> >
> > Hi,
> >
> >
> > Please find the latest report on new defect(s) introduced to ceph found 
> > with Coverity Scan.
> >
> > Defect(s) Reported-by: Coverity Scan
> > Showing 4 of 4 defect(s)
> >
> >
> > ** CID 1242019:  Data race condition  (MISSING_LOCK)
> > /msg/Pipe.cc: 230 in Pipe::DelayedDelivery::entry()()
> >
> > ** CID 1242021:  Resource leak  (RESOURCE_LEAK)
> > /test/librados/tier.cc: 1026 in 
> > LibRadosTwoPoolsPP_EvictSnap2_Test::TestBody()()
> > /test/librados/tier.cc: 1022 in 
> > LibRadosTwoPoolsPP_EvictSnap2_Test::TestBody()()
> > /test/librados/tier.cc: 1040 in 
> > LibRadosTwoPoolsPP_EvictSnap2_Test::TestBody()()
> > /test/librados/tier.cc: 1037 in 
> > LibRadosTwoPoolsPP_EvictSnap2_Test::TestBody()()
> >
> > ** CID 1242020:  Resource leak  (RESOURCE_LEAK)
> > /test/librados/aio.cc: 168 in LibRadosAio_TooBig_Test::TestBody()()
> >
> > ** CID 1242018:  Resource leak  (RESOURCE_LEAK)
> > /test/librados/aio.cc: 188 in LibRadosAio_TooBigPP_Test::TestBody()()
> > /test/librados/aio.cc: 190 in LibRadosAio_TooBigPP_Test::TestBody()()
> > /test/librados/aio.cc: 187 in LibRadosAio_TooBigPP_Test::TestBody()()
> >
> >
> > 
> > *** CID 1242019:  Data race condition  (MISSING_LOCK)
> > /msg/Pipe.cc: 230 in Pipe::DelayedDelivery::entry()()
> > 224 if (flush_count > 0) {
> > 225   --flush_count;
> > 226   active_flush = true;
> > 227 }
> > 228 if (pipe->in_q->can_fast_dispatch(m)) {
> > 229   if (!stop_fast_dispatching_flag) {
>  CID 1242019:  Data race condition  (MISSING_LOCK)
>  Accessing "this->delay_dispatching" without holding lock "Mutex._m". 
>  Elsewhere, "_ZN4Pipe15DelayedDeliveryE.delay_dispatching" is accessed 
>  with "Mutex._m" held 1 out of 2 times (1 of these accesses strongly 
>  imply that it is necessary).
> > 230 delay_dispatching = true;
> > 231 delay_lock.Unlock();
> > 232 pipe->in_q->fast_dispatch(m);
> > 233 delay_lock.Lock();
> > 234 delay_dispatching = false;
> > 235 if (stop_fast_dispatching_flag) {
> 
> This one's a false positive. (delay_dispatching is protected by the
> delay_lock, but I think it's picking up on the Pipe::lock which is
> held when DelayedDelivery is constructed and initialized.) Is there a
> way I should annotate this, or is it something we need to adjust in
> the Coverity web interface?

There are annotations but I don't know how they work.  I've been marking 
them through the web interface...

sage
--
To unsubscribe from this list: send the line "unsubscribe ceph-devel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: New Defects reported by Coverity Scan for ceph (fwd)

2014-09-30 Thread Gregory Farnum
On Tue, Sep 30, 2014 at 6:59 AM, Sage Weil  wrote:
> Looks like recent changes from Greg, Loic, and I.
>
> -- Forwarded message --
> From: scan-ad...@coverity.com
> To: undisclosed-recipients:;
> Cc:
> Date: Tue, 30 Sep 2014 06:21:08 -0700
> Subject: New Defects reported by Coverity Scan for ceph
>
>
> Hi,
>
>
> Please find the latest report on new defect(s) introduced to ceph found with 
> Coverity Scan.
>
> Defect(s) Reported-by: Coverity Scan
> Showing 4 of 4 defect(s)
>
>
> ** CID 1242019:  Data race condition  (MISSING_LOCK)
> /msg/Pipe.cc: 230 in Pipe::DelayedDelivery::entry()()
>
> ** CID 1242021:  Resource leak  (RESOURCE_LEAK)
> /test/librados/tier.cc: 1026 in 
> LibRadosTwoPoolsPP_EvictSnap2_Test::TestBody()()
> /test/librados/tier.cc: 1022 in 
> LibRadosTwoPoolsPP_EvictSnap2_Test::TestBody()()
> /test/librados/tier.cc: 1040 in 
> LibRadosTwoPoolsPP_EvictSnap2_Test::TestBody()()
> /test/librados/tier.cc: 1037 in 
> LibRadosTwoPoolsPP_EvictSnap2_Test::TestBody()()
>
> ** CID 1242020:  Resource leak  (RESOURCE_LEAK)
> /test/librados/aio.cc: 168 in LibRadosAio_TooBig_Test::TestBody()()
>
> ** CID 1242018:  Resource leak  (RESOURCE_LEAK)
> /test/librados/aio.cc: 188 in LibRadosAio_TooBigPP_Test::TestBody()()
> /test/librados/aio.cc: 190 in LibRadosAio_TooBigPP_Test::TestBody()()
> /test/librados/aio.cc: 187 in LibRadosAio_TooBigPP_Test::TestBody()()
>
>
> 
> *** CID 1242019:  Data race condition  (MISSING_LOCK)
> /msg/Pipe.cc: 230 in Pipe::DelayedDelivery::entry()()
> 224 if (flush_count > 0) {
> 225   --flush_count;
> 226   active_flush = true;
> 227 }
> 228 if (pipe->in_q->can_fast_dispatch(m)) {
> 229   if (!stop_fast_dispatching_flag) {
 CID 1242019:  Data race condition  (MISSING_LOCK)
 Accessing "this->delay_dispatching" without holding lock "Mutex._m". 
 Elsewhere, "_ZN4Pipe15DelayedDeliveryE.delay_dispatching" is accessed with 
 "Mutex._m" held 1 out of 2 times (1 of these accesses strongly imply that 
 it is necessary).
> 230 delay_dispatching = true;
> 231 delay_lock.Unlock();
> 232 pipe->in_q->fast_dispatch(m);
> 233 delay_lock.Lock();
> 234 delay_dispatching = false;
> 235 if (stop_fast_dispatching_flag) {

This one's a false positive. (delay_dispatching is protected by the
delay_lock, but I think it's picking up on the Pipe::lock which is
held when DelayedDelivery is constructed and initialized.) Is there a
way I should annotate this, or is it something we need to adjust in
the Coverity web interface?
-Greg
--
To unsubscribe from this list: send the line "unsubscribe ceph-devel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: New Defects reported by Coverity Scan for ceph (fwd)

2014-09-30 Thread Loic Dachary
I'll fix the aio.cc problems, thanks !

On 30/09/2014 15:59, Sage Weil wrote:
> Looks like recent changes from Greg, Loic, and I.
> 

-- 
Loïc Dachary, Artisan Logiciel Libre



signature.asc
Description: OpenPGP digital signature


New Defects reported by Coverity Scan for ceph (fwd)

2014-09-30 Thread Sage Weil
Looks like recent changes from Greg, Loic, and I.--- Begin Message ---


Hi,


Please find the latest report on new defect(s) introduced to ceph found with 
Coverity Scan.

Defect(s) Reported-by: Coverity Scan
Showing 4 of 4 defect(s)


** CID 1242019:  Data race condition  (MISSING_LOCK)
/msg/Pipe.cc: 230 in Pipe::DelayedDelivery::entry()()

** CID 1242021:  Resource leak  (RESOURCE_LEAK)
/test/librados/tier.cc: 1026 in LibRadosTwoPoolsPP_EvictSnap2_Test::TestBody()()
/test/librados/tier.cc: 1022 in LibRadosTwoPoolsPP_EvictSnap2_Test::TestBody()()
/test/librados/tier.cc: 1040 in LibRadosTwoPoolsPP_EvictSnap2_Test::TestBody()()
/test/librados/tier.cc: 1037 in LibRadosTwoPoolsPP_EvictSnap2_Test::TestBody()()

** CID 1242020:  Resource leak  (RESOURCE_LEAK)
/test/librados/aio.cc: 168 in LibRadosAio_TooBig_Test::TestBody()()

** CID 1242018:  Resource leak  (RESOURCE_LEAK)
/test/librados/aio.cc: 188 in LibRadosAio_TooBigPP_Test::TestBody()()
/test/librados/aio.cc: 190 in LibRadosAio_TooBigPP_Test::TestBody()()
/test/librados/aio.cc: 187 in LibRadosAio_TooBigPP_Test::TestBody()()



*** CID 1242019:  Data race condition  (MISSING_LOCK)
/msg/Pipe.cc: 230 in Pipe::DelayedDelivery::entry()()
224 if (flush_count > 0) {
225   --flush_count;
226   active_flush = true;
227 }
228 if (pipe->in_q->can_fast_dispatch(m)) {
229   if (!stop_fast_dispatching_flag) {
>>> CID 1242019:  Data race condition  (MISSING_LOCK)
>>> Accessing "this->delay_dispatching" without holding lock "Mutex._m". 
>>> Elsewhere, "_ZN4Pipe15DelayedDeliveryE.delay_dispatching" is accessed with 
>>> "Mutex._m" held 1 out of 2 times (1 of these accesses strongly imply that 
>>> it is necessary).
230 delay_dispatching = true;
231 delay_lock.Unlock();
232 pipe->in_q->fast_dispatch(m);
233 delay_lock.Lock();
234 delay_dispatching = false;
235 if (stop_fast_dispatching_flag) {


*** CID 1242021:  Resource leak  (RESOURCE_LEAK)
/test/librados/tier.cc: 1026 in LibRadosTwoPoolsPP_EvictSnap2_Test::TestBody()()
1020 op.cache_evict();
1021 librados::AioCompletion *completion = 
cluster.aio_create_completion();
1022 ASSERT_EQ(0, cache_ioctx.aio_operate(
1023   "foo", completion, &op,
1024   librados::OPERATION_IGNORE_CACHE, NULL));
1025 completion->wait_for_safe();
>>> CID 1242021:  Resource leak  (RESOURCE_LEAK)
>>> Variable "completion" going out of scope leaks the storage it points to.
1026 ASSERT_EQ(0, completion->get_return_value());
1027 completion->release();
1028   }
1029 
1030   // verify the snapdir is not present in the cache pool
1031   {
/test/librados/tier.cc: 1022 in LibRadosTwoPoolsPP_EvictSnap2_Test::TestBody()()
1016 
1017   // evict
1018   {
1019 ObjectReadOperation op;
1020 op.cache_evict();
1021 librados::AioCompletion *completion = 
cluster.aio_create_completion();
>>> CID 1242021:  Resource leak  (RESOURCE_LEAK)
>>> Variable "completion" going out of scope leaks the storage it points to.
1022 ASSERT_EQ(0, cache_ioctx.aio_operate(
1023   "foo", completion, &op,
1024   librados::OPERATION_IGNORE_CACHE, NULL));
1025 completion->wait_for_safe();
1026 ASSERT_EQ(0, completion->get_return_value());
1027 completion->release();
/test/librados/tier.cc: 1040 in LibRadosTwoPoolsPP_EvictSnap2_Test::TestBody()()
1034 op.list_snaps(&snapset, NULL);
1035 ioctx.snap_set_read(librados::SNAP_DIR);
1036 librados::AioCompletion *completion = 
cluster.aio_create_completion();
1037 ASSERT_EQ(0, ioctx.aio_operate("foo", completion, &op,
1038   librados::OPERATION_IGNORE_CACHE, 
NULL));
1039 completion->wait_for_safe();
>>> CID 1242021:  Resource leak  (RESOURCE_LEAK)
>>> Variable "completion" going out of scope leaks the storage it points to.
1040 ASSERT_EQ(-ENOENT, completion->get_return_value());
1041 completion->release();
1042   }
1043 }
1044 
1045 TEST_F(LibRadosTwoPoolsPP, TryFlush) {
/test/librados/tier.cc: 1037 in LibRadosTwoPoolsPP_EvictSnap2_Test::TestBody()()
1031   {
1032 ObjectReadOperation op;
1033 librados::snap_set_t snapset;
1034 op.list_snaps(&snapset, NULL);
1035 ioctx.snap_set_read(librados::SNAP_DIR);
1036 librados::AioCompletion *completion = 
cluster.aio_create_completion();
>>> CID 1242021:  Resource leak  (RESOURCE_LEAK)
>>> Variable "completion" going out of scope leaks the storage it points to.
1037 ASSERT_EQ(0, ioctx.aio_operate("foo", completion, &op,
1038

Re: New Defects reported by Coverity Scan for ceph (fwd)

2014-09-25 Thread John Spray
Nice to see that coverity and lockdep agree :-)

This should go away with the fix for #9562.

John

On Thu, Sep 25, 2014 at 4:02 PM, Sage Weil  wrote:
>
>
> -- Forwarded message --
> From: scan-ad...@coverity.com
> To: undisclosed-recipients:;
> Cc:
> Date: Thu, 25 Sep 2014 06:18:46 -0700
> Subject: New Defects reported by Coverity Scan for ceph
>
>
> Hi,
>
>
> Please find the latest report on new defect(s) introduced to ceph found with 
> Coverity Scan.
>
> Defect(s) Reported-by: Coverity Scan
> Showing 1 of 1 defect(s)
>
>
> ** CID 1241497:  Thread deadlock  (ORDER_REVERSAL)
>
>
>
> 
> *** CID 1241497:  Thread deadlock  (ORDER_REVERSAL)
> /osdc/Filer.cc: 314 in Filer::_do_purge_range(PurgeRange *, int)()
> 308 return;
> 309   }
> 310
> 311   int max = 10 - pr->uncommitted;
> 312   while (pr->num > 0 && max > 0) {
> 313 object_t oid = file_object_t(pr->ino, pr->first);
 CID 1241497:  Thread deadlock  (ORDER_REVERSAL)
 Calling "get_osdmap_read" acquires lock "RWLock.L" while holding lock 
 "Mutex._m" (count: 15 / 30).
> 314 const OSDMap *osdmap = objecter->get_osdmap_read();
> 315 object_locator_t oloc = 
> osdmap->file_to_object_locator(pr->layout);
> 316 objecter->put_osdmap_read();
> 317 objecter->remove(oid, oloc, pr->snapc, pr->mtime, pr->flags,
> 318  NULL, new C_PurgeRange(this, pr));
> 319 pr->uncommitted++;
>
>
> 
> To view the defects in Coverity Scan visit, 
> http://scan.coverity.com/projects/25?tab=overview
>
> To unsubscribe from the email notification for new defects, 
> http://scan5.coverity.com/cgi-bin/unsubscribe.py
>
>
>
>
--
To unsubscribe from this list: send the line "unsubscribe ceph-devel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


New Defects reported by Coverity Scan for ceph (fwd)

2014-09-25 Thread Sage Weil
--- Begin Message ---


Hi,


Please find the latest report on new defect(s) introduced to ceph found with 
Coverity Scan.

Defect(s) Reported-by: Coverity Scan
Showing 1 of 1 defect(s)


** CID 1241497:  Thread deadlock  (ORDER_REVERSAL)




*** CID 1241497:  Thread deadlock  (ORDER_REVERSAL)
/osdc/Filer.cc: 314 in Filer::_do_purge_range(PurgeRange *, int)()
308 return;
309   }
310 
311   int max = 10 - pr->uncommitted;
312   while (pr->num > 0 && max > 0) {
313 object_t oid = file_object_t(pr->ino, pr->first);
>>> CID 1241497:  Thread deadlock  (ORDER_REVERSAL)
>>> Calling "get_osdmap_read" acquires lock "RWLock.L" while holding lock 
>>> "Mutex._m" (count: 15 / 30).
314 const OSDMap *osdmap = objecter->get_osdmap_read();
315 object_locator_t oloc = osdmap->file_to_object_locator(pr->layout);
316 objecter->put_osdmap_read();
317 objecter->remove(oid, oloc, pr->snapc, pr->mtime, pr->flags,
318  NULL, new C_PurgeRange(this, pr));
319 pr->uncommitted++;



To view the defects in Coverity Scan visit, 
http://scan.coverity.com/projects/25?tab=overview

To unsubscribe from the email notification for new defects, 
http://scan5.coverity.com/cgi-bin/unsubscribe.py



--- End Message ---


New Defects reported by Coverity Scan for ceph (fwd)

2014-09-16 Thread Sage Weil
--- Begin Message ---


Hi,


Please find the latest report on new defect(s) introduced to ceph found with 
Coverity Scan.

Defect(s) Reported-by: Coverity Scan
Showing 20 of 38 defect(s)


** CID 717233:  Uninitialized scalar field  (UNINIT_CTOR)
/mds/Capability.h: 249 in Capability::Capability(CInode *, unsigned long, 
client_t)()

** CID 1238869:  Value not atomically updated  (ATOMICITY)
/osdc/Objecter.cc: 3055 in Objecter::handle_pool_op_reply(MPoolOpReply *)()
/osdc/Objecter.cc: 3055 in Objecter::handle_pool_op_reply(MPoolOpReply *)()
/osdc/Objecter.cc: 3055 in Objecter::handle_pool_op_reply(MPoolOpReply *)()

** CID 1238870:  Unchecked return value  (CHECKED_RETURN)
/test/test_snap_mapper.cc: 562 in MapperVerifier::remove_oid()()

** CID 1238871:  Dereference after null check  (FORWARD_NULL)
/mds/Server.cc: 6988 in Server::do_rename_rollback(ceph::buffer::list &, int, 
std::tr1::shared_ptr &, bool)()
/mds/Server.cc: 7107 in Server::do_rename_rollback(ceph::buffer::list &, int, 
std::tr1::shared_ptr &, bool)()

** CID 1238872:  Unchecked return value  (CHECKED_RETURN)
/tools/ceph_objectstore_tool.cc: 1284 in 
do_import_rados(std::basic_string, 
std::allocator>)()

** CID 1238873:  Unchecked return value  (CHECKED_RETURN)
/rbd_replay/Replayer.cc: 154 in rbd_replay::Replayer::run(const 
std::basic_string, std::allocator>&)()

** CID 1238874:  Missing unlock  (LOCK)
/osdc/Objecter.cc: 1855 in Objecter::op_cancel(Objecter::OSDSession *, unsigned 
long, int)()

** CID 1238875:  Unrecoverable parse warning  (PARSE_ERROR)
/client/Client.cc: 7737 in ()

** CID 1238876:  Unrecoverable parse warning  (PARSE_ERROR)
/client/Client.cc: 7735 in ()

** CID 1238877:  Missing unlock  (LOCK)
/common/Timer.cc: 240 in RWTimer::shutdown()()

** CID 1238878:  Unrecoverable parse warning  (PARSE_ERROR)
/client/Client.cc: 7734 in ()

** CID 1238879:  Thread deadlock  (ORDER_REVERSAL)


** CID 1238880:  Thread deadlock  (ORDER_REVERSAL)



** CID 1238881:  Thread deadlock  (ORDER_REVERSAL)



** CID 1238882:  Thread deadlock  (ORDER_REVERSAL)


** CID 1238883:  Improper use of negative value  (NEGATIVE_RETURNS)
/mds/MDS.cc: 962 in MDS::handle_mds_map(MMDSMap *)()

** CID 1238884:  Unrecoverable parse warning  (PARSE_ERROR)
/client/Client.cc: 7733 in ()

** CID 1238885:  Thread deadlock  (ORDER_REVERSAL)


** CID 1238886:  Thread deadlock  (ORDER_REVERSAL)


** CID 1238887:  Thread deadlock  (ORDER_REVERSAL)




*** CID 717233:  Uninitialized scalar field  (UNINIT_CTOR)
/mds/Capability.h: 249 in Capability::Capability(CInode *, unsigned long, 
client_t)()
243 suppress(0), state(0),
244 client_follows(0), client_xattr_version(0),
245 client_inline_version(0),
246 item_session_caps(this), item_snaprealm_caps(this), 
item_revoking_caps(this) {
247 g_num_cap++;
248 g_num_capa++;
>>> CID 717233:  Uninitialized scalar field  (UNINIT_CTOR)
>>> Non-static class member "num_revoke_warnings" is not initialized in 
>>> this constructor nor in any functions that it calls.
249   }
250   ~Capability() {
251 g_num_cap--;
252 g_num_caps++;
253   }
254 


*** CID 1238869:  Value not atomically updated  (ATOMICITY)
/osdc/Objecter.cc: 3055 in Objecter::handle_pool_op_reply(MPoolOpReply *)()
3049 if (!rwlock.is_wlocked()) {
3050   rwlock.unlock();
3051   rwlock.get_write();
3052 }
3053 iter = pool_ops.find(tid);
3054 if (iter != pool_ops.end()) {
>>> CID 1238869:  Value not atomically updated  (ATOMICITY)
>>> Using an unreliable value of "op" inside the second locked section. If 
>>> the data that "op" depends on was changed by another thread, this use might 
>>> be incorrect.
3055   _finish_pool_op(op);
3056 }
3057   } else {
3058 ldout(cct, 10) << "unknown request " << tid << dendl;
3059   }
3060   rwlock.unlock();
/osdc/Objecter.cc: 3055 in Objecter::handle_pool_op_reply(MPoolOpReply *)()
3049 if (!rwlock.is_wlocked()) {
3050   rwlock.unlock();
3051   rwlock.get_write();
3052 }
3053 iter = pool_ops.find(tid);
3054 if (iter != pool_ops.end()) {
>>> CID 1238869:  Value not atomically updated  (ATOMICITY)
>>> Using an unreliable value of "op" inside the second locked section. If 
>>> the data that "op" depends on was changed by another thread, this use might 
>>> be incorrect.
3055   _finish_pool_op(op);
3056 }
3057   } else {
3058 ldout(cct, 10) << "unknown request " << tid << dendl;
3059   }
3060   rwlock.unlock();
/osdc/Objecter.cc: 3055 in Objecter::handle_pool_op_reply(MPoolOpReply *)()
3049 if (!rwlock.is_wlocked()) {
3050   rwlock.unlock();
3051   

New Defects reported by Coverity Scan for ceph (fwd)

2014-08-23 Thread Sage Weil
--- Begin Message ---


Hi,


Please find the latest report on new defect(s) introduced to ceph found with 
Coverity Scan.

Defect(s) Reported-by: Coverity Scan
Showing 9 of 9 defect(s)


** CID 1232599:  Bad bit shift operation  (BAD_SHIFT)
/os/HashIndex.cc: 433 in HashIndex::pre_split_folder(unsigned int, unsigned 
long)()

** CID 1232600:  Bad bit shift operation  (BAD_SHIFT)
/os/HashIndex.cc: 421 in HashIndex::pre_split_folder(unsigned int, unsigned 
long)()

** CID 1232601:  Unchecked return value  (CHECKED_RETURN)
/rbd_replay/Replayer.cc: 154 in 
rbd_replay::Replayer::run(std::basic_string, 
std::allocator>)()

** CID 1232602:  Division or modulo by zero  (DIVIDE_BY_ZERO)
/mon/OSDMonitor.cc: 490 in OSDMonitor::reweight_by_utilization(int, 
std::basic_string, std::allocator>&, bool, 
const std::set, std::allocator> *)()

** CID 1232603:  Unintentional integer overflow  (OVERFLOW_BEFORE_WIDEN)
/test/objectstore/store_test.cc: 138 in 
StoreTest_SimpleColPreHashTest_Test::TestBody()()

** CID 1232604:  Unintentional integer overflow  (OVERFLOW_BEFORE_WIDEN)
/os/HashIndex.cc: 378 in HashIndex::pre_split_folder(unsigned int, unsigned 
long)()

** CID 1232605:  Uncaught exception  (UNCAUGHT_EXCEPT)
/rbd_replay/rbd-replay.cc: 51 in main()

** CID 1232606:  Uncaught exception  (UNCAUGHT_EXCEPT)
/rbd_replay/rbd-replay.cc: 51 in main()
/rbd_replay/rbd-replay.cc: 51 in main()
/rbd_replay/rbd-replay.cc: 51 in main()
/rbd_replay/rbd-replay.cc: 51 in main()
/rbd_replay/rbd-replay.cc: 51 in main()
/rbd_replay/rbd-replay.cc: 51 in main()

** CID 1232607:  Uninitialized pointer field  (UNINIT_CTOR)
/rbd_replay/Replayer.cc: 141 in rbd_replay::Replayer::Replayer(int)()



*** CID 1232599:  Bad bit shift operation  (BAD_SHIFT)
/os/HashIndex.cc: 433 in HashIndex::pre_split_folder(unsigned int, unsigned 
long)()
427   leavies /= subs;
428   while (leavies > 1) {
429 ++level;
430 leavies = leavies >> 4;
431   }
432   for (uint32_t i = 0; i < subs; ++i) {
>>> CID 1232599:  Bad bit shift operation  (BAD_SHIFT)
>>> In expression "i << (4 - split_bits) % 4", shifting by a negative 
>>> amount has undefined behavior.  The shift amount, "(4 - split_bits) % 4", 
>>> is -1.
433 int v = tmp_id | (i << ((4 - split_bits) % 4));
434 paths.push_back(to_hex(v));
435 ret = create_path(paths);
436 if (ret < 0 && ret != -EEXIST)
437   return ret;
438 ret = recursive_create_path(paths, level);


*** CID 1232600:  Bad bit shift operation  (BAD_SHIFT)
/os/HashIndex.cc: 421 in HashIndex::pre_split_folder(unsigned int, unsigned 
long)()
415   // this variable denotes how many bits (for this level) that can be
416   // used for sub folder splitting
417   int split_bits = 4 - left_bits;
418   // the below logic is inspired by rados.h#ceph_stable_mod,
419   // it basically determines how many sub-folders should we
420   // create for splitting
>>> CID 1232600:  Bad bit shift operation  (BAD_SHIFT)
>>> In expression "1 << pg_num_bits - 1", shifting by a negative amount has 
>>> undefined behavior.  The shift amount, "pg_num_bits - 1", is -1.
421   if (((1 << (pg_num_bits - 1)) | ps) >= pg_num) {
422 ++split_bits;
423   }
424   const uint32_t subs = (1 << split_bits);
425   // Calculate how many levels we create starting from here
426   int level  = 0;


*** CID 1232601:  Unchecked return value  (CHECKED_RETURN)
/rbd_replay/Replayer.cc: 154 in 
rbd_replay::Replayer::run(std::basic_string, 
std::allocator>)()
148   return m_action_trackers[id % m_num_action_trackers];
149 }
150 
151 void Replayer::run(const std::string replay_file) {
152   {
153 librados::Rados rados;
>>> CID 1232601:  Unchecked return value  (CHECKED_RETURN)
>>> Calling "init" without checking return value (as is done elsewhere 10 
>>> out of 11 times).
154 rados.init(NULL);
155 int r = rados.init_with_context(g_ceph_context);
156 if (r) {
157   cerr << "Unable to read conf file: " << r << std::endl;
158   goto out;
159 }


*** CID 1232602:  Division or modulo by zero  (DIVIDE_BY_ZERO)
/mon/OSDMonitor.cc: 490 in OSDMonitor::reweight_by_utilization(int, 
std::basic_string, std::allocator>&, bool, 
const std::set, std::allocator> *)()
484 }
485 ++pgs_by_osd[*q];
486 ++num_pg_copies;
487   }
488 }
489 
>>> CID 1232602:  Division or modulo by zero  (DIVIDE_BY_ZERO)
>>> In expression "num_p

New Defects reported by Coverity Scan for ceph (fwd)

2014-07-10 Thread Sage Weil
--- Begin Message ---


Hi,


Please find the latest report on new defect(s) introduced to ceph found with 
Coverity Scan.

Defect(s) Reported-by: Coverity Scan
Showing 5 of 5 defect(s)


** CID 1201388:  Missing unlock  (LOCK)
/mon/Monitor.cc: 438 in Monitor::preinit()()

** CID 1225099:  Unchecked return value  (CHECKED_RETURN)
/mon/MDSMonitor.cc: 1409 in MDSMonitor::filesystem_command(MMonCommand *, const 
std::basic_string, std::allocator>&, 
std::map, std::allocator>, 
boost::variant, 
std::allocator>, bool, long, double, std::vector, std::allocator>, 
std::allocator, 
std::allocator>>>, boost::detail::variant::void_, 
boost::detail::variant::void_, boost::detail::variant::void_, 
boost::detail::variant::void_, boost::detail::variant::void_, 
boost::detail::variant::void_, boost::detail::variant::void_, 
boost::detail::variant::void_, boost::detail::variant::void_, 
boost::detail::variant::void_, boost::detail::variant::void_, 
boost::detail::variant::void_, boost::detail::variant::void_, 
boost::detail::variant::void_, boost::detail::variant::void_>, std::less, std::allocator>>, 
std::allocator, 
std::allocator>, boost::variant, std::allocator>, bool, long, double, 
std::vector, 
std::allocator>, std::allocator, std::allocator>>>, boost::detail::variant::void_, 
boost::detail::variant::void_, boost::detail::variant::void_, 
boost::detail::variant::void_, boost::detail::variant::void_, 
boost::detail::variant::void_, boost::detail::variant::void_, 
boost::detail::variant::void_, boost::detail::variant::void_, 
boost::detail::variant::void_, boost::detail::variant::void_, 
boost::detail::variant::void_, boost::detail::variant::void_, 
boost::detail::variant::void_, boost::detail::variant::void_ &, 
std::basic_stringstream, std::allocator> &, int &)()

** CID 1225100:  Missing unlock  (LOCK)
/mds/MDLog.cc: 511 in MDLog::trim(int)()
/mds/MDLog.cc: 511 in MDLog::trim(int)()

** CID 1225101:  Missing unlock  (LOCK)
/mds/MDLog.cc: 394 in MDLog::shutdown()()

** CID 1225102:  Dereference before null check  (REVERSE_INULL)
/mon/Monitor.cc: 746 in Monitor::shutdown()()



*** CID 1201388:  Missing unlock  (LOCK)
/mon/Monitor.cc: 438 in Monitor::preinit()()
432 
433   dout(1) << "preinit fsid " << monmap->fsid << dendl;
434 
435   int r = sanitize_options();
436   if (r < 0) {
437 derr << "option sanitization failed!" << dendl;
>>> CID 1201388:  Missing unlock  (LOCK)
>>> Returning without unlocking "this->lock._m".
438 return r;
439   }
440 
441   assert(!logger);
442   {
443 PerfCountersBuilder pcb(g_ceph_context, "mon", l_mon_first, 
l_mon_last);


*** CID 1225099:  Unchecked return value  (CHECKED_RETURN)
/mon/MDSMonitor.cc: 1409 in MDSMonitor::filesystem_command(MMonCommand *, const 
std::basic_string, std::allocator>&, 
std::map, std::allocator>, 
boost::variant, 
std::allocator>, bool, long, double, std::vector, std::allocator>, 
std::allocator, 
std::allocator>>>, boost::detail::variant::void_, 
boost::detail::variant::void_, boost::detail::variant::void_, 
boost::detail::variant::void_, boost::detail::variant::void_, 
boost::detail::variant::void_, boost::detail::variant::void_, 
boost::detail::variant::void_, boost::detail::variant::void_, 
boost::detail::variant::void_, boost::detail::variant::void_, 
boost::detail::variant::void_, boost::detail::variant::void_, 
boost::detail::variant::void_, boost::detail::variant::void_>, std::less, std::allocator>>, 
std::allocator, 
std::allocator>, boost::variant, std::allocator>, bool, long, double, 
std::vector, 
std::allocator>, std::allocator, std::allocator>>>, boost::detail::variant::void_, 
boost::detail::variant::void_, boost::detail::variant::void_, 
boost::detail::variant::void_, boost::detail::variant::void_, 
boost::detail::variant::void_, boost::detail::variant::void_, 
boost::detail::variant::void_, boost::detail::variant::void_, 
boost::detail::variant::void_, boost::detail::variant::void_, 
boost::detail::variant::void_, boost::detail::variant::void_, 
boost::detail::variant::void_, boost::detail::variant::void_ &, 
std::basic_stringstream, std::allocator> &, int &)()
1403   r = -EINVAL;
1404   poolid = -1;
1405   ss << "cannot remove default data pool";
1406 }
1407 
1408 if (poolid >= 0) {
>>> CID 1225099:  Unchecked return value  (CHECKED_RETURN)
>>> Calling "cmd_getval" without checking return value (as is done 
>>> elsewhere 22 out of 25 times).
1409   cmd_getval(g_ceph_context, cmdmap, "poolid", poolid);
1410   r = pending_mdsmap.remove_data_pool(poolid);
1411   if (r == -ENOENT)
1412r = 0;
1413   if (r == 0)
1414ss << "removed data pool " << poolid << " f

New Defects reported by Coverity Scan for ceph (fwd)

2014-06-20 Thread Sage Weil
--- Begin Message ---


Hi,


Please find the latest report on new defect(s) introduced to ceph found with 
Coverity Scan.

Defect(s) Reported-by: Coverity Scan
Showing 1 of 1 defect(s)


** CID 1223276:  Division or modulo by zero  (DIVIDE_BY_ZERO)
/tools/rados/rados.cc: 1368 in rados_tool_common(const 
std::map, std::allocator>, 
std::basic_string, std::allocator>, 
std::less, 
std::allocator>>, std::allocator, std::allocator>, std::basic_string, std::allocator &, std::vector> &)()



*** CID 1223276:  Division or modulo by zero  (DIVIDE_BY_ZERO)
/tools/rados/rados.cc: 1368 in rados_tool_common(const 
std::map, std::allocator>, 
std::basic_string, std::allocator>, 
std::less, 
std::allocator>>, std::allocator, std::allocator>, std::basic_string, std::allocator &, std::vector> &)()
1362   }
1363 
1364   // align op_size
1365   if (io_ctx.pool_requires_alignment()) {
1366 const uint64_t align = io_ctx.pool_required_alignment();
1367 const bool wrn = (op_size != (1<<22));
>>> CID 1223276:  Division or modulo by zero  (DIVIDE_BY_ZERO)
>>> In expression "(op_size + align - 1UL) / align", division by expression 
>>> "align" which may be zero has undefined behavior.
1368 op_size = uint64_t((op_size + align - 1) / align) * align;
1369 if (wrn)
1370   cerr << "INFO: op_size has been rounded to " << op_size << 
std::endl;
1371   }
1372 
1373   // snapname?



To view the defects in Coverity Scan visit, 
http://scan.coverity.com/projects/25?tab=overview

To unsubscribe from the email notification for new defects, 
http://scan5.coverity.com/cgi-bin/unsubscribe.py



--- End Message ---


Re: New Defects reported by Coverity Scan for ceph (fwd)

2014-06-18 Thread Sebastien Ponce
I've now looked at the defects. And according to me, there are mostly
irrelevant.
There are 2 cases :
  - the 2 "division by zero" will never happen because we will never run
the test with a zero parameter. Still, I've added an ASSERT to avoid
their report.
  - all the rest is a clash between gtest and coverity : whenever you
allocate something via the C API and you have an assert between this
allocation and the deallocation, you have a potential resource leak if
the assert fails. However, avoiding it means wrapping all the C API in a
kind of autoPtr interface. I do not believe it's worth it for test code.
By the way, the rados tests must have the same leaks a priori and I
suppose they are ignored.

So is it ok to commit the 2 new asserts and ignore the other defects ?

Cheers,

Sebastien

On Sat, 2014-06-07 at 09:12 -0700, Sage Weil wrote:
> Mostly stuff in teh new libradosstriper code, it looks like.


--
To unsubscribe from this list: send the line "unsubscribe ceph-devel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: New Defects reported by Coverity Scan for ceph (fwd)

2014-06-08 Thread Sebastien Ponce
Not surprising, this was just integrated and I did not have the
possibility to run a coverity scan myself.
I'll fix all this.

Sebastien

On Sat, 2014-06-07 at 09:12 -0700, Sage Weil wrote:
> Mostly stuff in teh new libradosstriper code, it looks like.


--
To unsubscribe from this list: send the line "unsubscribe ceph-devel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


New Defects reported by Coverity Scan for ceph (fwd)

2014-06-07 Thread Sage Weil
Mostly stuff in teh new libradosstriper code, it looks like.--- Begin Message ---


Hi,


Please find the latest report on new defect(s) introduced to ceph found with 
Coverity Scan.

Defect(s) Reported-by: Coverity Scan
Showing 20 of 41 defect(s)


** CID 1221498:  Division or modulo by zero  (DIVIDE_BY_ZERO)
/test/libradosstriper/striping.cc: 59 in 
StriperTestRT::checkObjectFromRados(const std::basic_string, std::allocator>&, ceph::buffer::list &, unsigned 
long, unsigned long, unsigned long, unsigned long, unsigned long)()

** CID 1221499:  Division or modulo by zero  (DIVIDE_BY_ZERO)
/test/libradosstriper/striping.cc: 68 in 
StriperTestRT::checkObjectFromRados(const std::basic_string, std::allocator>&, ceph::buffer::list &, unsigned 
long, unsigned long, unsigned long, unsigned long, unsigned long)()

** CID 1221500:  Resource leak  (RESOURCE_LEAK)
/test/libradosstriper/io.cc: 341 in StriperTest_XattrIter_Test::TestBody()()
/test/libradosstriper/io.cc: 335 in StriperTest_XattrIter_Test::TestBody()()

** CID 1221501:  Resource leak  (RESOURCE_LEAK)
/test/libradosstriper/aio.cc: 513 in 
StriperTestPP_RoundTripWriteFullPP_Test::TestBody()()

** CID 1221502:  Resource leak  (RESOURCE_LEAK)
/test/libradosstriper/aio.cc: 537 in 
StriperTestPP_RoundTripWriteFullPP_Test::TestBody()()
/test/libradosstriper/aio.cc: 538 in 
StriperTestPP_RoundTripWriteFullPP_Test::TestBody()()
/test/libradosstriper/aio.cc: 532 in 
StriperTestPP_RoundTripWriteFullPP_Test::TestBody()()

** CID 1221503:  Resource leak  (RESOURCE_LEAK)
/test/libradosstriper/aio.cc: 440 in StriperTest_Flush_Test::TestBody()()
/test/libradosstriper/aio.cc: 435 in StriperTest_Flush_Test::TestBody()()
/test/libradosstriper/aio.cc: 433 in StriperTest_Flush_Test::TestBody()()

** CID 1221504:  Resource leak  (RESOURCE_LEAK)
/test/libradosstriper/aio.cc: 428 in StriperTest_Flush_Test::TestBody()()
/test/libradosstriper/aio.cc: 433 in StriperTest_Flush_Test::TestBody()()
/test/libradosstriper/aio.cc: 435 in StriperTest_Flush_Test::TestBody()()
/test/libradosstriper/aio.cc: 440 in StriperTest_Flush_Test::TestBody()()
/test/libradosstriper/aio.cc: 424 in StriperTest_Flush_Test::TestBody()()

** CID 1221505:  Resource leak  (RESOURCE_LEAK)
/test/libradosstriper/aio.cc: 306 in StriperTest_IsSafe_Test::TestBody()()
/test/libradosstriper/aio.cc: 301 in StriperTest_IsSafe_Test::TestBody()()
/test/libradosstriper/aio.cc: 299 in StriperTest_IsSafe_Test::TestBody()()

** CID 1221506:  Resource leak  (RESOURCE_LEAK)
/test/libradosstriper/aio.cc: 299 in StriperTest_IsSafe_Test::TestBody()()
/test/libradosstriper/aio.cc: 301 in StriperTest_IsSafe_Test::TestBody()()
/test/libradosstriper/aio.cc: 306 in StriperTest_IsSafe_Test::TestBody()()
/test/libradosstriper/aio.cc: 285 in StriperTest_IsSafe_Test::TestBody()()
/test/libradosstriper/aio.cc: 281 in StriperTest_IsSafe_Test::TestBody()()

** CID 1221507:  Resource leak  (RESOURCE_LEAK)
/test/libradosstriper/aio.cc: 453 in StriperTestPP_FlushPP_Test::TestBody()()
/test/libradosstriper/aio.cc: 458 in StriperTestPP_FlushPP_Test::TestBody()()
/test/libradosstriper/aio.cc: 463 in StriperTestPP_FlushPP_Test::TestBody()()

** CID 1221508:  Resource leak  (RESOURCE_LEAK)
/test/libradosstriper/aio.cc: 463 in StriperTestPP_FlushPP_Test::TestBody()()
/test/libradosstriper/aio.cc: 458 in StriperTestPP_FlushPP_Test::TestBody()()

** CID 1221509:  Resource leak  (RESOURCE_LEAK)
/test/libradosstriper/aio.cc: 124 in StriperTest_RoundTrip_Test::TestBody()()
/test/libradosstriper/aio.cc: 119 in StriperTest_RoundTrip_Test::TestBody()()
/test/libradosstriper/aio.cc: 117 in StriperTest_RoundTrip_Test::TestBody()()

** CID 1221510:  Resource leak  (RESOURCE_LEAK)
/test/libradosstriper/aio.cc: 108 in StriperTest_RoundTrip_Test::TestBody()()
/test/libradosstriper/aio.cc: 117 in StriperTest_RoundTrip_Test::TestBody()()
/test/libradosstriper/aio.cc: 119 in StriperTest_RoundTrip_Test::TestBody()()
/test/libradosstriper/aio.cc: 124 in StriperTest_RoundTrip_Test::TestBody()()
/test/libradosstriper/aio.cc: 104 in StriperTest_RoundTrip_Test::TestBody()()

** CID 1221511:  Resource leak  (RESOURCE_LEAK)
/test/libradosstriper/aio.cc: 333 in StriperTestPP_IsSafePP_Test::TestBody()()
/test/libradosstriper/aio.cc: 338 in StriperTestPP_IsSafePP_Test::TestBody()()
/test/libradosstriper/aio.cc: 319 in StriperTestPP_IsSafePP_Test::TestBody()()

** CID 1221512:  Resource leak  (RESOURCE_LEAK)
/test/libradosstriper/aio.cc: 338 in StriperTestPP_IsSafePP_Test::TestBody()()
/test/libradosstriper/aio.cc: 333 in StriperTestPP_IsSafePP_Test::TestBody()()

** CID 1221513:  Resource leak  (RESOURCE_LEAK)
/test/libradosstriper/aio.cc: 240 in StriperTest_IsComplete_Test::TestBody()()
/test/libradosstriper/aio.cc: 229 in StriperTest_IsComplete_Test::TestBody()()
/test/libradosstriper/aio.cc: 227 in StriperTest_IsComplete_Test::TestBody()()

** CID 1221514:  Resource leak  (RESOURCE_LEAK)
/test/libradosstriper/aio.cc: 218 in StriperTest_IsComplete_Test::TestBody()()
/test/li

New Defects reported by Coverity Scan for ceph (fwd)

2014-06-06 Thread Sage Weil
--- Begin Message ---


Hi,


Please find the latest report on new defect(s) introduced to ceph found with 
Coverity Scan.

Defect(s) Reported-by: Coverity Scan
Showing 1 of 1 defect(s)


** CID 1220854:  Invalid iterator comparison  (MISMATCHED_ITERATOR)
/osd/PG.cc: 981 in 
PG::calc_ec_acting(std::_Rb_tree_const_iterator>, unsigned int, const std::vector> &, 
pg_shard_t, const std::vector> &, pg_shard_t, const 
std::map, 
std::allocator>> &, bool, 
std::vector> *, std::set, std::allocator> *, std::set, std::allocator> *, pg_shard_t *, 
std::basic_ostream>&)()



*** CID 1220854:  Invalid iterator comparison  (MISMATCHED_ITERATOR)
/osd/PG.cc: 981 in 
PG::calc_ec_acting(std::_Rb_tree_const_iterator>, unsigned int, const std::vector> &, 
pg_shard_t, const std::vector> &, pg_shard_t, const 
std::map, 
std::allocator>> &, bool, 
std::vector> *, std::set, std::allocator> *, std::set, std::allocator> *, pg_shard_t *, 
std::basic_ostream>&)()
975 all_info.find(pg_shard_t(acting[i], 
shard_id_t(i)))->second.last_update >=
976 auth_log_shard->second.log_tail) {
977   ss << " selecting acting[i]: " << pg_shard_t(acting[i], 
shard_id_t(i)) << std::endl;
978   want[i] = acting[i];
979   ++usable;
980 } else {
>>> CID 1220854:  Invalid iterator comparison  (MISMATCHED_ITERATOR)
>>> Comparing "j" from "all_info_by_shard[shard_id_t(i)]" to 
>>> "all_info_by_shard[shard_id_t(i)]->end()" from 
>>> "all_info_by_shard[shard_id_t(i)]".
981   for (set::iterator j = 
all_info_by_shard[shard_id_t(i)].begin();
982j != all_info_by_shard[shard_id_t(i)].end();
983++j) {
984 assert(j->shard == i);
985 if (!all_info.find(*j)->second.is_incomplete() &&
986 all_info.find(*j)->second.last_update >=



To view the defects in Coverity Scan visit, 
http://scan.coverity.com/projects/25?tab=overview

To unsubscribe from the email notification for new defects, 
http://scan5.coverity.com/cgi-bin/unsubscribe.py



--- End Message ---


New Defects reported by Coverity Scan for ceph (fwd)

2014-05-20 Thread Sage Weil
--- Begin Message ---


Hi,


Please find the latest report on new defect(s) introduced to ceph found with 
Coverity Scan.

Defect(s) Reported-by: Coverity Scan
Showing 1 of 1 defect(s)


** CID 1214678:  Unchecked return value  (CHECKED_RETURN)
/osd/OSD.cc: 318 in OSDService::_maybe_split_pgid(std::tr1::shared_ptr, std::tr1::shared_ptr, spg_t)()



*** CID 1214678:  Unchecked return value  (CHECKED_RETURN)
/osd/OSD.cc: 318 in OSDService::_maybe_split_pgid(std::tr1::shared_ptr, std::tr1::shared_ptr, spg_t)()
312   OSDMapRef new_map,
313   spg_t pgid)
314 {
315   assert(old_map->have_pg_pool(pgid.pool()));
316   if (pgid.ps() < 
static_cast(old_map->get_pg_num(pgid.pool( {
317 set children;
>>> CID 1214678:  Unchecked return value  (CHECKED_RETURN)
>>> No check of the return value of 
>>> "pgid.is_split(old_map->get_pg_num(pgid.pool()), 
>>> new_map->get_pg_num(pgid.pool()), &children)".
318 pgid.is_split(old_map->get_pg_num(pgid.pool()),
319   new_map->get_pg_num(pgid.pool()), &children);
320 _start_split(pgid, children);
321   } else {
322 assert(pgid.ps() < 
static_cast(new_map->get_pg_num(pgid.pool(;
323   }



To view the defects in Coverity Scan visit, 
http://scan.coverity.com/projects/25?tab=overview

To unsubscribe from the email notification for new defects, 
http://scan5.coverity.com/cgi-bin/unsubscribe.py



--- End Message ---


New Defects reported by Coverity Scan for ceph (fwd)

2014-05-10 Thread Sage Weil
--- Begin Message ---


Hi,


Please find the latest report on new defect(s) introduced to ceph found with 
Coverity Scan.

Defect(s) Reported-by: Coverity Scan
Showing 8 of 8 defect(s)


** CID 717008:  Dereference null return value  (NULL_RETURNS)


** CID 1128417:  Dereference null return value  (NULL_RETURNS)
/mds/MDCache.cc: 9986 in MDCache::handle_discover_reply(MDiscoverReply *)()

** CID 1213580:  Dereference null return value  (NULL_RETURNS)
/osd/OSD.cc: 5150 in OSD::dispatch_op_fast(std::tr1::shared_ptr, 
std::tr1::shared_ptr)()

** CID 1213581:  Dereference null return value  (NULL_RETURNS)
/osd/OSD.cc: 4944 in OSD::ms_fast_preprocess(Message *)()

** CID 1213582:  Dereference null return value  (NULL_RETURNS)
/osd/Watch.cc: 359 in Watch::discard_state()()

** CID 1213583:  Dereference null return value  (NULL_RETURNS)
/osd/Watch.cc: 321 in Watch::connect(boost::intrusive_ptr)()

** CID 1213584:  Thread deadlock  (ORDER_REVERSAL)


** CID 1213585:  Thread deadlock  (ORDER_REVERSAL)




*** CID 717008:  Dereference null return value  (NULL_RETURNS)
/osd/ReplicatedPG.cc: 4962 in 
ReplicatedPG::do_osd_op_effects(ReplicatedPG::OpContext *)()
4956 
4957 void ReplicatedPG::do_osd_op_effects(OpContext *ctx)
4958 {
4959   ConnectionRef conn(ctx->op->get_req()->get_connection());
4960   boost::intrusive_ptr session(
4961 (OSD::Session *)conn->get_priv());
>>> CID 717008:  Dereference null return value  (NULL_RETURNS)
>>> Dereferencing a pointer that might be null "session" when calling 
>>> "RefCountedObject::put()".
4962   session->put();  // get_priv() takes a ref, and so does the 
intrusive_ptr
4963   entity_name_t entity = ctx->reqid.name;
4964 
4965   dout(15) << "do_osd_op_effects on session " << session.get() << 
dendl;
4966 
4967   for (list::iterator i = ctx->watch_connects.begin();


*** CID 1128417:  Dereference null return value  (NULL_RETURNS)
/mds/MDCache.cc: 9986 in MDCache::handle_discover_reply(MDiscoverReply *)()
9980 } else {
9981   // note: this can only happen our first way around this loop.
9982   if (p.end() && m->is_flag_error_dn()) {
9983fg = cur->pick_dirfrag(m->get_error_dentry());
9984curdir = cur->get_dirfrag(fg);
9985   } else
>>> CID 1128417:  Dereference null return value  (NULL_RETURNS)
>>> Assigning: "curdir" = null return value from 
>>> "CInode::get_dirfrag(frag_t)".
9986curdir = cur->get_dirfrag(m->get_base_dir_frag());
9987 }
9988 
9989 if (p.end())
9990   break;
9991 


*** CID 1213580:  Dereference null return value  (NULL_RETURNS)
/osd/OSD.cc: 5150 in OSD::dispatch_op_fast(std::tr1::shared_ptr, 
std::tr1::shared_ptr)()
5144 // we're shutting down, so drop the op
5145 return true;
5146   }
5147 
5148   epoch_t msg_epoch(op_required_epoch(op));
5149   if (msg_epoch > osdmap->get_epoch()) {
>>> CID 1213580:  Dereference null return value  (NULL_RETURNS)
>>> Assigning: "s" = null return value from "Connection::get_priv()".
5150 Session *s = static_cast(op->get_req()->
5151   get_connection()->get_priv());
5152 s->received_map_lock.Lock();
5153 epoch_t received_epoch = s->received_map_epoch;
5154 s->received_map_lock.Unlock();
5155 if (received_epoch < msg_epoch) {


*** CID 1213581:  Dereference null return value  (NULL_RETURNS)
/osd/OSD.cc: 4944 in OSD::ms_fast_preprocess(Message *)()
4938 
4939 void OSD::ms_fast_preprocess(Message *m)
4940 {
4941   if (m->get_connection()->get_peer_type() == CEPH_ENTITY_TYPE_OSD) {
4942 if (m->get_type() == CEPH_MSG_OSD_MAP) {
4943   MOSDMap *mm = static_cast(m);
>>> CID 1213581:  Dereference null return value  (NULL_RETURNS)
>>> Assigning: "s" = null return value from "Connection::get_priv()".
4944   Session *s = 
static_cast(m->get_connection()->get_priv());
4945   s->received_map_lock.Lock();
4946   s->received_map_epoch = mm->get_last();
4947   s->received_map_lock.Unlock();
4948   s->put();
4949 }


*** CID 1213582:  Dereference null return value  (NULL_RETURNS)
/osd/Watch.cc: 359 in Watch::discard_state()()
353   assert(!discarded);
354   assert(obc);
355   in_progress_notifies.clear();
356   unregister_cb();
357   discarded

New Defects reported by Coverity Scan for ceph (fwd)

2014-04-22 Thread Sage Weil
--- Begin Message ---


Hi,


Please find the latest report on new defect(s) introduced to ceph found with 
Coverity Scan.

Defect(s) Reported-by: Coverity Scan
Showing 4 of 4 defect(s)


** CID 1204293:  Buffer not null terminated  (BUFFER_SIZE_WARNING)
/mds/MDS.cc: 1714 in MDS::respawn()()

** CID 1204294:  Resource leak  (RESOURCE_LEAK)
/osd/ReplicatedPG.cc: 980 in 
ReplicatedPG::do_pg_op(std::tr1::shared_ptr)()

** CID 1204295:  Uninitialized scalar field  (UNINIT_CTOR)
/osd/osd_types.h: 2716 in ObjectContext::RWState::RWState()()

** CID 1204296:  Uninitialized scalar field  (UNINIT_CTOR)
/osdc/Objecter.h: 1165 in Objecter::Op::Op(const object_t &, const 
object_locator_t &, std::vector> &, int, Context 
*, Context *, unsigned long *)()
/osdc/Objecter.h: 1165 in Objecter::Op::Op(const object_t &, const 
object_locator_t &, std::vector> &, int, Context 
*, Context *, unsigned long *)()



*** CID 1204293:  Buffer not null terminated  (BUFFER_SIZE_WARNING)
/mds/MDS.cc: 1714 in MDS::respawn()()
1708 char buf[PATH_MAX];
1709 char *cwd = getcwd(buf, sizeof(buf));
1710 assert(cwd);
1711 dout(1) << " cwd " << cwd << dendl;
1712 
1713 /* Fall back to a best-effort: just running in our CWD */
>>> CID 1204293:  Buffer not null terminated  (BUFFER_SIZE_WARNING)
>>> Calling strncpy with a maximum size argument of 4096 bytes on 
>>> destination array "exe_path" of size 4096 bytes might leave the destination 
>>> string unterminated.
1714 strncpy(exe_path, orig_argv[0], sizeof(exe_path));
1715   }
1716 
1717   dout(1) << " exe_path " << exe_path << dendl;
1718 
1719   unblock_all_signals(NULL);


*** CID 1204294:  Resource leak  (RESOURCE_LEAK)
/osd/ReplicatedPG.cc: 980 in 
ReplicatedPG::do_pg_op(std::tr1::shared_ptr)()
974 // FIXME: EC not supported yet
975 result = -EOPNOTSUPP;
976 break;
977   }
978   if (is_unreadable_object(oid)) {
979 wait_for_unreadable_object(oid, op);
>>> CID 1204294:  Resource leak  (RESOURCE_LEAK)
>>> Variable "filter" going out of scope leaks the storage it points to.
980 return;
981   }
982   result = osd->store->read(coll, oid, 0, 0, osd_op.outdata);
983 }
984   }
985   break;


*** CID 1204295:  Uninitialized scalar field  (UNINIT_CTOR)
/osd/osd_types.h: 2716 in ObjectContext::RWState::RWState()()
2710 /// if set, restart backfill when we can get a read lock
2711 bool backfill_read_marker;
2712 
2713 /// if set, requeue snaptrim on lock release
2714 bool snaptrimmer_write_marker;
2715 
>>> CID 1204295:  Uninitialized scalar field  (UNINIT_CTOR)
>>> Non-static class member "snaptrimmer_write_marker" is not initialized 
>>> in this constructor nor in any functions that it calls.
2716 RWState() : state(RWNONE), count(0), backfill_read_marker(false) {}
2717 bool get_read(OpRequestRef op) {
2718   if (get_read_lock()) {
2719return true;
2720   } // else
2721   waiters.push_back(op);


*** CID 1204296:  Uninitialized scalar field  (UNINIT_CTOR)
/osdc/Objecter.h: 1165 in Objecter::Op::Op(const object_t &, const 
object_locator_t &, std::vector> &, int, Context 
*, Context *, unsigned long *)()
1159out_handler[i] = NULL;
1160out_rval[i] = NULL;
1161   }
1162 
1163   if (target.base_oloc.key == o)
1164target.base_oloc.key.clear();
>>> CID 1204296:  Uninitialized scalar field  (UNINIT_CTOR)
>>> Non-static class member "used_replica" is not initialized in this 
>>> constructor nor in any functions that it calls.
1165 }
1166 ~Op() {
1167   while (!out_handler.empty()) {
1168delete out_handler.back();
1169out_handler.pop_back();
1170   }
/osdc/Objecter.h: 1165 in Objecter::Op::Op(const object_t &, const 
object_locator_t &, std::vector> &, int, Context 
*, Context *, unsigned long *)()
1159out_handler[i] = NULL;
1160out_rval[i] = NULL;
1161   }
1162 
1163   if (target.base_oloc.key == o)
1164target.base_oloc.key.clear();
>>> CID 1204296:  Uninitialized scalar field  (UNINIT_CTOR)
>>> Non-static class member "used_replica" is not initialized in this 
>>> constructor nor in any functions that it calls.
1165 }
1166 ~Op() {
1167   while (!out_handler.

Re: New Defects reported by Coverity Scan for ceph (fwd)

2014-04-12 Thread Loic Dachary
I'll deal with the problems found in gf-complete: 
http://tracker.ceph.com/issues/8083

On 12/04/2014 06:06, Sage Weil wrote:
> Several new defects.  This wasn't running for the last couple of weeks 
> because the submodules didn't update on the test checkout.
> 

-- 
Loïc Dachary, Artisan Logiciel Libre



signature.asc
Description: OpenPGP digital signature


New Defects reported by Coverity Scan for ceph (fwd)

2014-04-11 Thread Sage Weil
Several new defects.  This wasn't running for the last couple of weeks 
because the submodules didn't update on the test checkout.

--- Begin Message ---


Hi,


Please find the latest report on new defect(s) introduced to ceph found with 
Coverity Scan.

Defect(s) Reported-by: Coverity Scan
Showing 20 of 41 defect(s)


** CID 1201374:  Unchecked return value  (CHECKED_RETURN)
/mon/MDSMonitor.cc: 1097 in MDSMonitor::prepare_command(MMonCommand *)()

** CID 1201375:  Unchecked return value  (CHECKED_RETURN)
/osd/OSD.cc: 4460 in OSD::do_command(Connection *, unsigned long, 
std::vector, 
std::allocator>, std::allocator, std::allocator>>> &, ceph::buffer::list &)()

** CID 1201376:  Unchecked return value  (CHECKED_RETURN)
/test/system/rados_list_parallel.cc: 333 in main()

** CID 1201377:  Unchecked return value  (CHECKED_RETURN)
/test/system/rados_list_parallel.cc: 335 in main()

** CID 1201378:  Unchecked return value  (CHECKED_RETURN)
/test/system/rados_list_parallel.cc: 330 in main()

** CID 1201379:  Copy-paste error  (COPY_PASTE_ERROR)
/mds/Server.cc: 7119 in Server::do_rename_rollback(ceph::buffer::list &, int, 
std::tr1::shared_ptr &, bool)()

** CID 1201380:  Logically dead code  (DEADCODE)
/erasure-code/jerasure/gf-complete/src/gf.c: 291 in gf_error_check()

** CID 1201381:  Logically dead code  (DEADCODE)
/erasure-code/jerasure/gf-complete/src/gf.c: 376 in gf_error_check()
/erasure-code/jerasure/gf-complete/src/gf.c: 377 in gf_error_check()

** CID 1201382:  Dereference after null check  (FORWARD_NULL)
/mds/Server.cc: 6073 in 
Server::_rename_prepare(std::tr1::shared_ptr &, EMetaBlob *, 
ceph::buffer::list *, CDentry *, CDentry *, CDentry *)()

** CID 1201383:  Dereference after null check  (FORWARD_NULL)
/mds/Server.cc: 7112 in Server::do_rename_rollback(ceph::buffer::list &, int, 
std::tr1::shared_ptr &, bool)()

** CID 1201384:  Dereference after null check  (FORWARD_NULL)
/mds/Server.cc: 7154 in 
Server::_rename_rollback_finish(std::tr1::shared_ptr &, 
std::tr1::shared_ptr &, CDentry *, unsigned long, CDentry *, 
CDentry *, bool)()

** CID 1201385:  Using invalid iterator  (INVALIDATE_ITERATOR)
/mds/Locker.cc: 416 in 
Locker::acquire_locks(std::tr1::shared_ptr &, 
std::set, std::allocator> 
&, std::set, std::allocator> &, std::set, 
std::allocator> &, std::map, std::allocator>> *, 
CInode *, bool)()
/mds/Locker.cc: 416 in 
Locker::acquire_locks(std::tr1::shared_ptr &, 
std::set, std::allocator> 
&, std::set, std::allocator> &, std::set, 
std::allocator> &, std::map, std::allocator>> *, 
CInode *, bool)()

** CID 1201386:  Using invalid iterator  (INVALIDATE_ITERATOR)
/test/librados/lock.cc: 371 in LibRadosLockECPP_BreakLockPP_Test::TestBody()()

** CID 1201387:  Using invalid iterator  (INVALIDATE_ITERATOR)
/test/librados/lock.cc: 330 in LibRadosLockECPP_ListLockersPP_Test::TestBody()()

** CID 1201388:  Missing unlock  (LOCK)
/mon/Monitor.cc: 462 in Monitor::preinit()()

** CID 1201389:  Out-of-bounds access  (OVERRUN)
/test/librbd/test_librbd.cc: 1825 in LibRBD_ZeroLengthDiscard_Test::TestBody()()

** CID 1201390:  Out-of-bounds access  (OVERRUN_DYNAMIC)
/test/librbd/test_librbd.cc: 1825 in LibRBD_ZeroLengthDiscard_Test::TestBody()()

** CID 1201391:  Out-of-bounds write  (OVERRUN_STATIC)
/erasure-code/jerasure/gf-complete/src/gf_w8.c: 1267 in gf_w8_table_init()

** CID 1201392:  Resource leak  (RESOURCE_LEAK)
/test/librados/TestCase.cc: 134 in RadosTestEC::cleanup_default_namespace(void 
*)()
/test/librados/TestCase.cc: 139 in RadosTestEC::cleanup_default_namespace(void 
*)()
/test/librados/TestCase.cc: 141 in RadosTestEC::cleanup_default_namespace(void 
*)()

** CID 1201393:  Resource leak  (RESOURCE_LEAK)
/test/librados/aio.cc: 1881 in LibRadosAioEC_RoundTripAppend_Test::TestBody()()
/test/librados/aio.cc: 1883 in LibRadosAioEC_RoundTripAppend_Test::TestBody()()



*** CID 1201374:  Unchecked return value  (CHECKED_RETURN)
/mon/MDSMonitor.cc: 1097 in MDSMonitor::prepare_command(MMonCommand *)()
1091   r = -EINVAL;
1092   poolid = -1;
1093   ss << "cannot remove default data pool";
1094 }
1095 
1096 if (poolid >= 0) {
>>> CID 1201374:  Unchecked return value  (CHECKED_RETURN)
>>> No check of the return value of "cmd_getval(g_ceph_context, cmdmap, 
>>> std::string("poolid", std::allocator()), poolid)".
1097   cmd_getval(g_ceph_context, cmdmap, "poolid", poolid);
1098   r = pending_mdsmap.remove_data_pool(poolid);
1099   if (r == -ENOENT)
1100r = 0;
1101   if (r == 0)
1102ss << "removed data pool " << poolid << " from mdsmap";


*** CID 1201375:  Unchecked return value  (CHECKED_RETURN)
/osd/OSD.cc: 4460 in OSD::do_command(Connection *, unsigned long, 
std::vector, 
std::allocator>

Re: New Defects reported by Coverity Scan for ceph (fwd)

2014-03-03 Thread Li Wang
Then it seems that Coverity is only able to perform intra-procedure 
check, is there any inter-procedure check option to turn on?


On 2014/3/4 6:53, John Spray wrote:

On Mon, Mar 3, 2014 at 10:23 PM, Sage Weil  wrote:

** CID 1188299:  Data race condition  (MISSING_LOCK)
/mds/MDSUtility.cc: 142 in MDSUtility::handle_mds_map(MMDSMap *)()


Is there a trick to getting coverity to realise that the lock is held,
but by the calling function?  Does it recognise assertions that the
lock is held?

Cheers,
John
--
To unsubscribe from this list: send the line "unsubscribe ceph-devel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


--
To unsubscribe from this list: send the line "unsubscribe ceph-devel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: New Defects reported by Coverity Scan for ceph (fwd)

2014-03-03 Thread John Spray
On Mon, Mar 3, 2014 at 10:23 PM, Sage Weil  wrote:
> ** CID 1188299:  Data race condition  (MISSING_LOCK)
> /mds/MDSUtility.cc: 142 in MDSUtility::handle_mds_map(MMDSMap *)()

Is there a trick to getting coverity to realise that the lock is held,
but by the calling function?  Does it recognise assertions that the
lock is held?

Cheers,
John
--
To unsubscribe from this list: send the line "unsubscribe ceph-devel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


New Defects reported by Coverity Scan for ceph (fwd)

2014-03-03 Thread Sage Weil
--- Begin Message ---


Hi,


Please find the latest report on new defect(s) introduced to ceph found with 
Coverity Scan.

Defect(s) Reported-by: Coverity Scan
Showing 15 of 15 defect(s)


** CID 1188299:  Data race condition  (MISSING_LOCK)
/mds/MDSUtility.cc: 142 in MDSUtility::handle_mds_map(MMDSMap *)()

** CID 717359:  Uninitialized scalar field  (UNINIT_CTOR)
/rgw/rgw_common.cc: 161 in req_state::req_state(CephContext *, RGWEnv *)()

** CID 1188131:  Division or modulo by zero  (DIVIDE_BY_ZERO)
/common/histogram.h: 97 in pow2_hist_t::get_position_micro(int, unsigned long 
*, unsigned long *)()

** CID 716921:  Dereference after null check  (FORWARD_NULL)
/mds/MDCache.cc: 6950 in MDCache::handle_cache_expire(MCacheExpire *)()

** CID 1188134:  Unchecked dynamic_cast  (FORWARD_NULL)
/osd/ReplicatedBackend.cc: 428 in 
RPGTransaction::append(PGBackend::PGTransaction *)()

** CID 1188135:  Unchecked dynamic_cast  (FORWARD_NULL)
/osd/ReplicatedBackend.cc: 494 in ReplicatedBackend::submit_transaction(const 
hobject_t &, const eversion_t &, PGBackend::PGTransaction *, const eversion_t 
&, std::vector> &, Context *, 
Context *, Context *, unsigned long, osd_reqid_t, 
std::tr1::shared_ptr)()

** CID 716990:  Dereference null return value  (NULL_RETURNS)
/mds/MDCache.cc: 10098 in MDCache::handle_discover(MDiscover *)()

** CID 1135931:  Resource leak  (RESOURCE_LEAK)
/os/FileStore.cc: 1739 in FileStore::queue_transactions(ObjectStore::Sequencer 
*, std::list> &, std::tr1::shared_ptr, 
ThreadPool::TPHandle *)()

** CID 1135933:  Resource leak  (RESOURCE_LEAK)
/os/FileStore.cc: 1739 in FileStore::queue_transactions(ObjectStore::Sequencer 
*, std::list> &, std::tr1::shared_ptr, 
ThreadPool::TPHandle *)()

** CID 1188126:  Unchecked return value  (CHECKED_RETURN)
/test/objectstore/store_test.cc: 564 in SyntheticWorkloadState::stat()()

** CID 1188145:  Resource leak  (RESOURCE_LEAK)
/osd/ReplicatedPG.cc: 5231 in 
ReplicatedPG::fill_in_copy_get(ReplicatedPG::OpContext *, 
ceph::buffer::list::iterator &, OSDOp &, std::tr1::shared_ptr &, 
bool)()

** CID 1188156:  Resource leak  (RESOURCE_LEAK)
/test/librados/c_write_operations.cc: 131 in 
LibRadosCWriteOps_Exec_Test::TestBody()()
/test/librados/c_write_operations.cc: 134 in 
LibRadosCWriteOps_Exec_Test::TestBody()()
/test/librados/c_write_operations.cc: 136 in 
LibRadosCWriteOps_Exec_Test::TestBody()()
/test/librados/c_write_operations.cc: 138 in 
LibRadosCWriteOps_Exec_Test::TestBody()()
/test/librados/c_write_operations.cc: 139 in 
LibRadosCWriteOps_Exec_Test::TestBody()()

** CID 1160848:  Uninitialized scalar variable  (UNINIT)
/osdc/Objecter.cc: 1519 in Objecter::recalc_op_target(Objecter::Op *)()

** CID 1030132:  Uninitialized scalar variable  (UNINIT)
/mon/PGMonitor.cc: 1979 in 
PGMonitor::dump_stuck_pg_stats(std::basic_stringstream, std::allocator> &, ceph::Formatter *, int, 
std::vector, 
std::allocator>, std::allocator, std::allocator>>> &) const()
/mon/PGMonitor.cc: 1979 in 
PGMonitor::dump_stuck_pg_stats(std::basic_stringstream, std::allocator> &, ceph::Formatter *, int, 
std::vector, 
std::allocator>, std::allocator, std::allocator>>> &) const()

** CID 1135932:  Resource leak  (RESOURCE_LEAK)
/os/FileStore.cc: 1739 in FileStore::queue_transactions(ObjectStore::Sequencer 
*, std::list> &, std::tr1::shared_ptr, 
ThreadPool::TPHandle *)()



*** CID 1188299:  Data race condition  (MISSING_LOCK)
/mds/MDSUtility.cc: 142 in MDSUtility::handle_mds_map(MMDSMap *)()
136 
137 void MDSUtility::handle_mds_map(MMDSMap* m)
138 {
139   mdsmap->decode(m->get_encoded());
140   if (waiting_for_mds_map) {
141 waiting_for_mds_map->complete(0);
>>> CID 1188299:  Data race condition  (MISSING_LOCK)
>>> Accessing "this->waiting_for_mds_map" 
>>> ("MDSUtility.waiting_for_mds_map") requires the "Mutex._m" lock.
142 waiting_for_mds_map = NULL;
143   }
144 }
145 
146 
147 bool MDSUtility::ms_get_authorizer(int dest_type, AuthAuthorizer 
**authorizer,


*** CID 717359:  Uninitialized scalar field  (UNINIT_CTOR)
/rgw/rgw_common.cc: 161 in req_state::req_state(CephContext *, RGWEnv *)()
155   length = NULL;
156   copy_source = NULL;
157   http_auth = NULL;
158   local_source = false;
159 
160   obj_ctx = NULL;
>>> CID 717359:  Uninitialized scalar field  (UNINIT_CTOR)
>>> Non-static class member "bucket_exists" is not initialized in this 
>>> constructor nor in any functions that it calls.
161 }
162 
163 req_state::~req_state() {
164   delete formatter;
165   delete bucket_acl;
166   delete object_acl;


*** CID 1188131:  Division or modulo by zero  (DIVIDE

New Defects reported by Coverity Scan for ceph (fwd)

2013-12-17 Thread Sage Weil
--- Begin Message ---


Hi,


Please find the latest report on new defect(s) introduced to ceph found with 
Coverity Scan.

Defect(s) Reported-by: Coverity Scan
Showing 7 of 13 defect(s)


** CID 1138583:  Dereference null return value  (NULL_RETURNS)
/mds/MDCache.cc: 5074 in MDCache::process_imported_caps()()

** CID 1138584:  Dereference null return value  (NULL_RETURNS)
/mds/Migrator.cc: 2480 in Migrator::import_finish(CDir *, bool, bool)()

** CID 1138585:  Dereference null return value  (NULL_RETURNS)
/mds/Migrator.cc: 2289 in Migrator::import_reverse(CDir *)()

** CID 1138586:  Dereference null return value  (NULL_RETURNS)
/mds/Migrator.cc: 2297 in Migrator::import_reverse(CDir *)()

** CID 1138587:  Out-of-bounds access  (OVERRUN)
/messages/MClientCaps.h: 170 in MClientCaps::decode_payload()()
/messages/MClientCaps.h: 170 in MClientCaps::decode_payload()()
/messages/MClientCaps.h: 170 in MClientCaps::decode_payload()()

** CID 1138588:  Out-of-bounds access  (OVERRUN)
/messages/MClientCaps.h: 179 in MClientCaps::encode_payload(unsigned long)()
/messages/MClientCaps.h: 179 in MClientCaps::encode_payload(unsigned long)()
/messages/MClientCaps.h: 179 in MClientCaps::encode_payload(unsigned long)()

** CID 739602:  Dereference null return value  (NULL_RETURNS)
/mds/Server.cc: 707 in Server::reconnect_tick()()





To view the defects in Coverity Scan visit, http://scan.coverity.com

To unsubscribe from the email notification for new defects, 
http://scan5.coverity.com/cgi-bin/unsubscribe.py



--- End Message ---


Re: New Defects reported by Coverity Scan for ceph (fwd)

2013-12-17 Thread Ilya Dryomov
On Mon, Dec 16, 2013 at 6:07 PM, Sage Weil  wrote:
>
>
> -- Forwarded message --
> From: scan-ad...@coverity.com
> To: undisclosed-recipients:;
> Cc:
> Date: Mon, 16 Dec 2013 00:57:57 -0800
> Subject: New Defects reported by Coverity Scan for ceph
>
>
> Hi,
>
>
> Please find the latest report on new defect(s) introduced to ceph found with 
> Coverity Scan.
>
> Defect(s) Reported-by: Coverity Scan
> Showing 4 of 4 defect(s)
>
>
> ** CID 1138366:  Resource leak  (RESOURCE_LEAK)
> /test/librados/tier.cc: 96 in LibRadosMisc_HitSetNone_Test::TestBody()()
> /test/librados/tier.cc: 98 in LibRadosMisc_HitSetNone_Test::TestBody()()
> /test/librados/tier.cc: 99 in LibRadosMisc_HitSetNone_Test::TestBody()()
>
> ** CID 1138367:  Time of check time of use  (TOCTOU)
> /rbd.cc: 2024 in do_kernel_rm(const char *)()
>
> ** CID 1138368:  Time of check time of use  (TOCTOU)
> /rbd.cc: 1735 in do_kernel_add(const char *, const char *, const char *)()

Look like these two refer to my recent work on rbd:

2019   const char *fname = "/sys/bus/rbd/remove_single_major";
2020   if (stat(fname, &sbuf)) {
2021 fname = "/sys/bus/rbd/remove";
2022   }
2023
2024   int fd = open(fname, O_WRONLY); <---
2025   if (fd < 0) {

This is not a TOCTOU as there is no race here, so technically
annotations are in order, but I'll redo it as two open()s instead.

Thanks,

Ilya
--
To unsubscribe from this list: send the line "unsubscribe ceph-devel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


New Defects reported by Coverity Scan for ceph (fwd)

2013-12-16 Thread Sage Weil
--- Begin Message ---


Hi,


Please find the latest report on new defect(s) introduced to ceph found with 
Coverity Scan.

Defect(s) Reported-by: Coverity Scan
Showing 4 of 4 defect(s)


** CID 1138366:  Resource leak  (RESOURCE_LEAK)
/test/librados/tier.cc: 96 in LibRadosMisc_HitSetNone_Test::TestBody()()
/test/librados/tier.cc: 98 in LibRadosMisc_HitSetNone_Test::TestBody()()
/test/librados/tier.cc: 99 in LibRadosMisc_HitSetNone_Test::TestBody()()

** CID 1138367:  Time of check time of use  (TOCTOU)
/rbd.cc: 2024 in do_kernel_rm(const char *)()

** CID 1138368:  Time of check time of use  (TOCTOU)
/rbd.cc: 1735 in do_kernel_add(const char *, const char *, const char *)()

** CID 1138369:  Uncaught exception  (UNCAUGHT_EXCEPT)
/test/librados/tier.cc: 369 in main()
/test/librados/tier.cc: 369 in main()





To view the defects in Coverity Scan visit, http://scan.coverity.com

To unsubscribe from the email notification for new defects, 
http://scan5.coverity.com/cgi-bin/unsubscribe.py



--- End Message ---


New Defects reported by Coverity Scan for ceph (fwd)

2013-08-20 Thread Sage Weil
Coverity picked up some issues with the filestore code.  These are mostly 
old issues that appear new becuase code moved around, but this is probably 
a good opportunity to fix them... :)

sage--- Begin Message ---


Hi,

Please find the latest report on new defect(s) introduced to ceph found with 
Coverity Scan

Defect(s) Reported-by: Coverity Scan
Showing 7 of 9 defects

** CID 1063704: Uninitialized scalar field (UNINIT_CTOR)
/os/BtrfsFileStoreBackend.cc: 57

** CID 1063703: Time of check time of use (TOCTOU)
/os/GenericFileStoreBackend.cc: 170

** CID 1063702: Time of check time of use (TOCTOU)
/os/BtrfsFileStoreBackend.cc: 246

** CID 1063701: Copy into fixed size buffer (STRING_OVERFLOW)
/os/BtrfsFileStoreBackend.cc: 458

** CID 1063700: Copy into fixed size buffer (STRING_OVERFLOW)
/os/BtrfsFileStoreBackend.cc: 370

** CID 1063699: Resource leak (RESOURCE_LEAK)
/os/BtrfsFileStoreBackend.cc: 345

** CID 1063698: Improper use of negative value (NEGATIVE_RETURNS)



CID 1063704: Uninitialized scalar field (UNINIT_CTOR)

/os/BtrfsFileStoreBackend.h: 25 ( member_decl)
   22private:
   23  bool has_clone_range;   ///< clone range ioctl is supported
   24  bool has_snap_create;   ///< snap create ioctl is supported
>>> Class member declaration for "has_snap_destroy".
   25  bool has_snap_destroy;  ///< snap destroy ioctl is supported
   26  bool has_snap_create_v2;///< snap create v2 ioctl (async!) is 
supported
   27  bool has_wait_sync; ///< wait sync ioctl is supported
   28  bool stable_commits;
   29  bool m_filestore_btrfs_clone_range;
  

/os/BtrfsFileStoreBackend.cc: 57 ( uninit_member)
   54GenericFileStoreBackend(fs), has_clone_range(false), 
has_snap_create(false),
   55has_snap_create_v2(false), has_wait_sync(false), 
stable_commits(false),
   56m_filestore_btrfs_clone_range(g_conf->filestore_btrfs_clone_range),
>>> CID 1063704: Uninitialized scalar field (UNINIT_CTOR)
>>> Non-static class member "has_snap_destroy" is not initialized in this 
>>> constructor nor in any functions that it calls.
   57m_filestore_btrfs_snap (g_conf->filestore_btrfs_snap) { }
   58
   59int BtrfsFileStoreBackend::detect_features()
   60{
   61  int r;
  

CID 1063703: Time of check time of use (TOCTOU)

/os/GenericFileStoreBackend.cc: 170 ( fs_check_call)
   167int GenericFileStoreBackend::create_current()
   168{
   169  struct stat st;
>>> CID 1063703: Time of check time of use (TOCTOU)
>>> Calling function "stat(char const *, stat *)" to perform check on 
>>> "this->get_current_path()->c_str()".
   170  int ret = ::stat(get_current_path().c_str(), &st);
   171  if (ret == 0) {
   172// current/ exists
   173if (!S_ISDIR(st.st_mode)) {
   174  dout(0) << "_create_current: current/ exists but is not a 
directory" << dendl;
  

/os/GenericFileStoreBackend.cc: 178 ( toctou)
   175  ret = -EINVAL;
   176}
   177  } else {
>>> Calling function "mkdir(char const *, __mode_t)" that uses 
>>> "this->get_current_path()->c_str()" after a check function. This can cause 
>>> a time-of-check, time-of-use race condition.
   178ret = ::mkdir(get_current_path().c_str(), 0755);
   179if (ret < 0) {
   180  ret = -errno;
   181  dout(0) << "_create_current: mkdir " << get_current_path() << " 
failed: "<< cpp_strerror(ret) << dendl;
   182}
  

CID 1063702: Time of check time of use (TOCTOU)

/os/BtrfsFileStoreBackend.cc: 246 ( fs_check_call)
   243int BtrfsFileStoreBackend::create_current()
   244{
   245  struct stat st;
>>> CID 1063702: Time of check time of use (TOCTOU)
>>> Calling function "stat(char const *, stat *)" to perform check on 
>>> "this->get_current_path()->c_str()".
   246  int ret = ::stat(get_current_path().c_str(), &st);
   247  if (ret == 0) {
   248// current/ exists
   249if (!S_ISDIR(st.st_mode)) {
   250  dout(0) << "create_current: current/ exists but is not a 
directory" << dendl;
  

/os/BtrfsFileStoreBackend.cc: 288 ( toctou)
   285  }
   286
   287  dout(2) << "create_current: created btrfs subvol " << 
get_current_path() << dendl;
>>> Calling function "chmod(char const *, __mode_t)" that uses 
>>> "this->get_current_path()->c_str()" after a check function. This can cause 
>>> a time-of-check, time-of-use race condition.
   288  if (::chmod(get_current_path().c_str(), 0755) < 0) {
   289ret = -errno;
   290dout(0) << "create_current: failed to chmod " << 
get_current_path() << " to 0755: "
   291  << cpp_strerror(ret) << dendl;
   292return ret;
  
___

New Defects reported by Coverity Scan for ceph (fwd)

2013-07-25 Thread Sage Weil
--- Begin Message ---


Hi,

Please find the latest report on new defect(s) introduced to ceph found with 
Coverity Scan

Defect(s) Reported-by: Coverity Scan


** CID 1057291: Resource leak (RESOURCE_LEAK)
/rgw/rgw_rados.cc: 2643



CID 1057291: Resource leak (RESOURCE_LEAK)

/rgw/rgw_rados.cc: 2641 ( alloc_arg)
   2638
   2639RGWRESTStreamWriteRequest *out_stream_req;
   2640
>>> "RGWRESTConn::put_obj_init(std::string const &, rgw_obj &, uint64_t, 
>>> std::map, 
>>> std::allocator >, ceph::buffer::list, 
>>> std::less, 
>>> std::allocator > >, std::allocator>> std::char_traits, std::allocator > const, ceph::buffer::list> > 
>>> > &, RGWRESTStreamWriteRequest **)" allocates memory that is stored into 
>>> "out_stream_req".
   2641int ret = rest_master_conn->put_obj_init(user_id, dest_obj, 
astate->size, src_attrs, &out_stream_req);
   2642if (ret < 0)
   2643  return ret;
   2644
   2645ret = get_obj_iterate(ctx, &handle, src_obj, 0, astate->size - 
1, out_stream_req->get_out_cb());
  

/rgw/rgw_rados.cc: 2643 ( leaked_storage)
   2640
   2641int ret = rest_master_conn->put_obj_init(user_id, dest_obj, 
astate->size, src_attrs, &out_stream_req);
   2642if (ret < 0)
>>> CID 1057291: Resource leak (RESOURCE_LEAK)
>>> Variable "out_stream_req" going out of scope leaks the storage it points to.
   2643  return ret;
   2644
   2645ret = get_obj_iterate(ctx, &handle, src_obj, 0, astate->size - 
1, out_stream_req->get_out_cb());
   2646if (ret < 0)
   2647  return ret;
  

To view the defects in Coverity Scan visit, http://scan.coverity.com

To unsubscribe from the email notification for new defects, 
http://scan5.coverity.com/cgi-bin/unsubscribe.py

--- End Message ---


New Defects reported by Coverity Scan for ceph (fwd)

2013-07-19 Thread Sage Weil
Several new rgw issues from the recent merge...--- Begin Message ---


Hi,

Please find the latest report on new defect(s) introduced to ceph found with 
Coverity Scan

Defect(s) Reported-by: Coverity Scan
Showing 7 of 61 defects

** CID 1049252: Wrapper object use after free (WRAPPER_ESCAPE)
/rgw/rgw_rest_replica_log.h: 79

** CID 1049251: Wrapper object use after free (WRAPPER_ESCAPE)
/rgw/rgw_rest_replica_log.h: 59

** CID 1049250: Wrapper object use after free (WRAPPER_ESCAPE)
/rgw/rgw_rest_replica_log.h: 39

** CID 1049249: Missing varargs init or cleanup (VARARGS)
/test/librbd/test_librbd.cc: 315

** CID 1049248: Use after free (USE_AFTER_FREE)
/test/test_rgw_admin_log.cc: 782

** CID 1049247: Use after free (USE_AFTER_FREE)
/test/cls_version/test_cls_version.cc: 79

** CID 1049246: Use after free (USE_AFTER_FREE)
/rgw/rgw_rest_s3.cc: 415



CID 1049252: Wrapper object use after free (WRAPPER_ESCAPE)

/rgw/rgw_rest_replica_log.h: 79 ( escape)
   76string s = "replica";
   77s.append(obj_type);
   78s.append("_deletebound");
>>> CID 1049252: Wrapper object use after free (WRAPPER_ESCAPE)
>>> The internal representation of "s" escapes, but is destroyed when it exits 
>>> scope.
   79return s.c_str();
   80  }
   81};
   82
   83class RGWOp_BILog_GetBounds : public RGWRESTOp {
  

CID 1049251: Wrapper object use after free (WRAPPER_ESCAPE)

/rgw/rgw_rest_replica_log.h: 59 ( escape)
   56string s = "replica";
   57s.append(obj_type);
   58s.append("_updatebounds");
>>> CID 1049251: Wrapper object use after free (WRAPPER_ESCAPE)
>>> The internal representation of "s" escapes, but is destroyed when it exits 
>>> scope.
   59return s.c_str();
   60  }
   61};
   62
   63class RGWOp_OBJLog_DeleteBounds : public RGWRESTOp {
  

CID 1049250: Wrapper object use after free (WRAPPER_ESCAPE)

/rgw/rgw_rest_replica_log.h: 39 ( escape)
   36string s = "replica";
   37s.append(obj_type);
   38s.append("_getbounds");
>>> CID 1049250: Wrapper object use after free (WRAPPER_ESCAPE)
>>> The internal representation of "s" escapes, but is destroyed when it exits 
>>> scope.
   39return s.c_str();
   40  }
   41};
   42
   43class RGWOp_OBJLog_SetBounds : public RGWRESTOp {
  

CID 1049249: Missing varargs init or cleanup (VARARGS)

/test/librbd/test_librbd.cc: 305 ( va_init)
   302cout << "image: " << names[i] << endl;
   303  }
   304
>>> Initializing va_list "ap".
   305  va_start(ap, num_expected);
   306  for (i = num_expected; i > 0; i--) {
   307char *expected = va_arg(ap, char *);
   308cout << "expected = " << expected << endl;
   309vector::iterator listed_name = find(names.begin(), 
names.end(), string(expected));
  

/test/librbd/test_librbd.cc: 315 ( missing_va_end)
   312  }
   313  assert(names.empty());
   314
>>> CID 1049249: Missing varargs init or cleanup (VARARGS)
>>> va_end was not called for "ap".
   315  return num;
   316}
   317
   318TEST(LibRBD, TestCreateLsDeletePP)
   319{
  

CID 1049248: Use after free (USE_AFTER_FREE)

/test/test_rgw_admin_log.cc: 750 ( freed_arg)
   747  char *bucket_obj = (char *)malloc(TEST_BUCKET_OBJECT_SIZE);
   748  ASSERT_TRUE(bucket_obj != NULL);
   749  EXPECT_EQ(put_bucket_obj(TEST_BUCKET_OBJECT, bucket_obj, 
TEST_BUCKET_OBJECT_SIZE), 0);
>>> "free(void *)" frees "bucket_obj".
   750  free(bucket_obj);
   751  sleep(1); 
   752  ss << "/admin/log?type=data&id=" << shard_id << "&start-time=" << 
start_time;
   753  rest_req = ss.str();
   754  g_test->send_request(string("GET"), rest_req);
  

/test/test_rgw_admin_log.cc: 782 ( pass_freed_arg)
   779  }
   780
   781  sleep(1);
>>> CID 1049248: Use after free (USE_AFTER_FREE)
>>> Passing freed pointer "bucket_obj" as an argument to function 
>>> "put_bucket_obj(char const *, char *, unsigned int)".
   782  EXPECT_EQ(put_bucket_obj(TEST_BUCKET_OBJECT, bucket_obj, 
TEST_BUCKET_OBJECT_SIZE), 0);
   783  sleep(20);
   784  ss.str("");
   785  ss << "/admin/log?type=data&id=" << shard_id << "&start-time=" << 
start_time;
   786  rest_req = ss.str();
  

CID 1049247: Use after free (USE_AFTER_FREE)

/test/cls_version/test_cls_version.cc: 68 ( freed_arg)
   65  ASSERT_GT((long long)ver2.ver, (long long)ver.ver);
   66  ASSERT_EQ(0, (int)ver2.tag.compare(ver.tag));
   67
>>> "operator delete(void *)

Re: New Defects reported by Coverity Scan for ceph (fwd)

2013-06-19 Thread Loic Dachary
Hi Sage,

I believe
https://github.com/ceph/ceph/pull/366
fixes the issue. It was introduced by
https://github.com/ceph/ceph/commit/b16fdf23e5325623874ee1a02e134f577c5a7dcd#L0R364
which was merged in master yesterday.

Cheers

On 06/19/2013 09:36 PM, Sage Weil wrote:
> Dereferencing iterator "p" though it is already past the end of its container.

-- 
Loïc Dachary, Artisan Logiciel Libre
All that is necessary for the triumph of evil is that good people do nothing.



signature.asc
Description: OpenPGP digital signature


New Defects reported by Coverity Scan for ceph (fwd)

2013-06-19 Thread Sage Weil
Hi Loic,

Do you mind looking at the PGLog.cc iterator issue coverity picked up on?

sage--- Begin Message ---


Hi,

Please find the latest report on new defect(s) introduced to ceph found with 
Coverity Scan

Defect(s) Reported-by: Coverity Scan


** CID 1035577: Resource leak (RESOURCE_LEAK)
/test/libcephfs/multiclient.cc: 67

** CID 1035576: Resource leak (RESOURCE_LEAK)
/test/libcephfs/multiclient.cc: 32

** CID 1035575: Resource leak (RESOURCE_LEAK)
/test/libcephfs/multiclient.cc: 27

** CID 1035574: Using invalid iterator (INVALIDATE_ITERATOR)
/osd/PGLog.cc: 369



CID 1035577: Resource leak (RESOURCE_LEAK)

/test/libcephfs/multiclient.cc: 67 ( alloc_arg)
   64
   65TEST(LibCephFS, MulticlientHoleEOF) {
   66  struct ceph_mount_info *ca, *cb;
>>> "ceph_create(ceph_mount_info **, char const *)" allocates memory that is 
>>> stored into "ca".
   67  ASSERT_EQ(ceph_create(&ca, NULL), 0);
   68  ASSERT_EQ(0, ceph_conf_parse_env(ca, NULL));
   69  ASSERT_EQ(ceph_conf_read_file(ca, NULL), 0);
   70  ASSERT_EQ(ceph_mount(ca, NULL), 0);
   71
  

/test/libcephfs/multiclient.cc: 67 ( leaked_storage)
   64
   65TEST(LibCephFS, MulticlientHoleEOF) {
   66  struct ceph_mount_info *ca, *cb;
>>> CID 1035577: Resource leak (RESOURCE_LEAK)
>>> Variable "ca" going out of scope leaks the storage it points to.
   67  ASSERT_EQ(ceph_create(&ca, NULL), 0);
   68  ASSERT_EQ(0, ceph_conf_parse_env(ca, NULL));
   69  ASSERT_EQ(ceph_conf_read_file(ca, NULL), 0);
   70  ASSERT_EQ(ceph_mount(ca, NULL), 0);
   71
  

CID 1035576: Resource leak (RESOURCE_LEAK)

/test/libcephfs/multiclient.cc: 32 ( alloc_arg)
   29  ASSERT_EQ(ceph_conf_read_file(ca, NULL), 0);
   30  ASSERT_EQ(ceph_mount(ca, NULL), 0);
   31
>>> "ceph_create(ceph_mount_info **, char const *)" allocates memory that is 
>>> stored into "cb".
   32  ASSERT_EQ(ceph_create(&cb, NULL), 0);
   33  ASSERT_EQ(0, ceph_conf_parse_env(cb, NULL));
   34  ASSERT_EQ(ceph_conf_read_file(cb, NULL), 0);
   35  ASSERT_EQ(ceph_mount(cb, NULL), 0);
   36
  

/test/libcephfs/multiclient.cc: 32 ( leaked_storage)
   29  ASSERT_EQ(ceph_conf_read_file(ca, NULL), 0);
   30  ASSERT_EQ(ceph_mount(ca, NULL), 0);
   31
>>> CID 1035576: Resource leak (RESOURCE_LEAK)
>>> Variable "cb" going out of scope leaks the storage it points to.
   32  ASSERT_EQ(ceph_create(&cb, NULL), 0);
   33  ASSERT_EQ(0, ceph_conf_parse_env(cb, NULL));
   34  ASSERT_EQ(ceph_conf_read_file(cb, NULL), 0);
   35  ASSERT_EQ(ceph_mount(cb, NULL), 0);
   36
  

CID 1035575: Resource leak (RESOURCE_LEAK)

/test/libcephfs/multiclient.cc: 27 ( alloc_arg)
   24
   25TEST(LibCephFS, MulticlientSimple) {
   26  struct ceph_mount_info *ca, *cb;
>>> "ceph_create(ceph_mount_info **, char const *)" allocates memory that is 
>>> stored into "ca".
   27  ASSERT_EQ(ceph_create(&ca, NULL), 0);
   28  ASSERT_EQ(0, ceph_conf_parse_env(ca, NULL));
   29  ASSERT_EQ(ceph_conf_read_file(ca, NULL), 0);
   30  ASSERT_EQ(ceph_mount(ca, NULL), 0);
   31
  

/test/libcephfs/multiclient.cc: 27 ( leaked_storage)
   24
   25TEST(LibCephFS, MulticlientSimple) {
   26  struct ceph_mount_info *ca, *cb;
>>> CID 1035575: Resource leak (RESOURCE_LEAK)
>>> Variable "ca" going out of scope leaks the storage it points to.
   27  ASSERT_EQ(ceph_create(&ca, NULL), 0);
   28  ASSERT_EQ(0, ceph_conf_parse_env(ca, NULL));
   29  ASSERT_EQ(ceph_conf_read_file(ca, NULL), 0);
   30  ASSERT_EQ(ceph_mount(ca, NULL), 0);
   31
  

CID 1035574: Using invalid iterator (INVALIDATE_ITERATOR)

/osd/PGLog.cc: 361 ( past_the_end)
   358  dout(10) << "rewind_divergent_log truncate divergent future " << 
newhead << dendl;
   359  assert(newhead > log.tail);
   360
>>> Function "end" creates an iterator.
   361  list::iterator p = log.log.end();
   362  list divergent;
   363  while (true) {
   364if (p == log.log.begin()) {
   365  // yikes, the whole thing is divergent!
  

/osd/PGLog.cc: 361 ( assign_var)
   358  dout(10) << "rewind_divergent_log truncate divergent future " << 
newhead << dendl;
   359  assert(newhead > log.tail);
   360
>>> Assigning: "p" = "this->log.log.end()".
   361  list::iterator p = log.log.end();
   362  list divergent;
   363  while (true) {
   364if (p == log.log.begin()) {
   365  // yikes, the whole thing is divergent!
  

/osd/PGLog.cc: 369 ( deref_iterator)
   366  divergent.swap(log.log);
   367  break;
   368}
>>> CID 1035574: Using invalid iterator (INVALIDATE_I