On Monday 01 October 2001 04:59 pm, Calin A. Culianu wrote:
> Hmm.. just playing around with pointers should not take that long
> (unless you are like on a 16 MHz 386sx or something).
>
> Here's one possible explanation that doesn't really account for the 5ms
> run-time of each thread, but does account for the observed phenomenon that
> 1 thread running 1 task will display significantly lower run times than 2
> threads that share a mutex running the same task:
>
> 1) Thread A acquires lock
> 2) Thread B wants lock, but can't get it so sleeps
> 3) Thread A does work that takes time T
> 4) Thread A releases lock
> 5) Thread B is immediately awoken because it was next-in-line for the
> mutex
> 6) Thread B does work that takes roughly time T as well
> 7) Thread B releases lock
> 8) goto (1)
>
> As you can see, thread B did work that took time T, but ended up taking
> over 2T's work of time to do it.
>
> Now this is a sort of obvious example.. but could it be enough to explain
> your really long run times?  Maybe if you posted some code I might be able
> to help better?  Thanks...
>
>
> -Calin

Thanks a lot for your help, here is the code:
-----------------------------------------------------
Hardware : Bi processor PIII 1 GHz 512Mo
-----------------------------------------------------
Software :
 � Redhat 7.1
 � Kernel 2.4.4 from kernel.org
 � RT-Linux 3.1
-----------------------------------------------------
pthread_attr_t     attr1,attr2;
struct sched_param sched_param1,sched_param2;
pthread_t          pthread_test1;
pthread_t          pthread_test2;
int                counter1 = 0;
int                duration1= 0;
int                cpu1     = 100;
int                counter2 = 0;
int                duration2= 0;
int                cpu2     = 100;

extern struct mem_pool         *mem;
extern volatile char           *shm;

//--------------------------------------------------
void *thread_test1(void* notusedP) {
//--------------------------------------------------
  hrtime_t start, end, process;
  struct mem_block *mb;

  pthread_attr_getcpu_np(&attr1, &cpu1);
  pthread_make_periodic_np(pthread_self(), gethrtime(),   10000000);

  while (1) {

    pthread_wait_np();
    start = gethrtime();

    mb = get_free_mem_block(200);
    free_mem_block(mb);
    mb = get_free_mem_block(200);
    free_mem_block(mb);
    mb = get_free_mem_block(200);
    free_mem_block(mb);
    mb = get_free_mem_block(200);
    free_mem_block(mb);
    mb = get_free_mem_block(200);
    free_mem_block(mb);
    mb = get_free_mem_block(200);
    free_mem_block(mb);
    mb = get_free_mem_block(200);
    free_mem_block(mb);
    mb = get_free_mem_block(200);
    free_mem_block(mb);
    mb = get_free_mem_block(200);
    free_mem_block(mb);
    mb = get_free_mem_block(200);
    free_mem_block(mb);

    end = gethrtime();
    process = end - start;
    if (process > 1000000) {
      counter1+=1;
      duration1 += process;
    }
  }
}
//--------------------------------------------------
void *thread_test2(void* notusedP) {
//--------------------------------------------------
  hrtime_t start, end,process;
  struct mem_block *mb;

  pthread_attr_getcpu_np(&attr2, &cpu2);
  pthread_make_periodic_np(pthread_self(), gethrtime(),   99000000);

  while (1) {
    pthread_wait_np();

    start = gethrtime();

    mb = get_free_mem_block(200);
    free_mem_block(mb);
    mb = get_free_mem_block(200);
    free_mem_block(mb);
    mb = get_free_mem_block(200);
    free_mem_block(mb);
    mb = get_free_mem_block(200);
    free_mem_block(mb);
    mb = get_free_mem_block(200);
    free_mem_block(mb);
    mb = get_free_mem_block(200);
    free_mem_block(mb);
    mb = get_free_mem_block(200);
    free_mem_block(mb);
    mb = get_free_mem_block(200);
    free_mem_block(mb);
    mb = get_free_mem_block(200);
    free_mem_block(mb);
    mb = get_free_mem_block(200);
    free_mem_block(mb);
    end = gethrtime();
    process = end - start;
    if (process > 1000000) {
      counter2+=1;
      duration2 += process;
    }
  }
}
//------------------------------------------------------------------------------
int init_module(void) {
//------------------------------------------------------------------------------
  pthread_attr_t     attr;
  struct sched_param sched_param;
  long int           shm_size;
  int                result, i;
  int                ret, index;


    shm_size = sizeof(struct mem_pool);
    printk("shm size = %ld bytes\n", shm_size);
    shm_size =  (shm_size & ~((long int)0xFFFF)) + 0x10000;// alloc by 
64Kbytes
    printk("mbuff_alloc %ld Kbytes\n", shm_size>>10);

    shm = (volatile char*) mbuff_alloc("wcdma",shm_size);

    if(shm == NULL) {
      printk("mbuff_alloc failed\n");
      return(2);
    }
    printk("mbuff_alloc succeed mem_pool=%p\n", shm);
    mem          = (struct mem_pool   *)shm;


    /**********************************/
    /* discovering processor IDs      */
    /**********************************/
    nb_processors = 0;
    printk("Found processor IDs ");       
    for (index=0;(index<32) && (nb_processors < NB_MAX_PROCESSORS); index++) {
      if (rtl_cpu_exists(index)) {
        processor_id[nb_processors++]=index;
        printk("%d ",index);
      }
    }
    printk("\n");
    /**********************************/
    /* init memory management         */
    /**********************************/
    pthread_attr_init (&attr);
    sched_param.sched_priority = 2;
    pthread_attr_setschedparam (&attr, &sched_param);

    printk("creation thread init mem\n");
    ret = pthread_create(&pool_buffer_init_thr, &attr, pool_buffer_init, 
(void *)mem);
    pthread_join(pool_buffer_init_thr, NULL);
    printk("joined thread init mem\n");

    pthread_attr_init (&attr1);
    if (nb_processors > 1) {
       pthread_attr_setcpu_np(&attr1, processor_id[nb_processors -1]);
    }
    sched_param1.sched_priority = 10;
    pthread_attr_setschedparam (&attr1, &sched_param1);
    ret = pthread_create(&pthread_test1, &attr1, thread_test1,(void *)1);

    pthread_attr_init (&attr2);
    if (nb_processors > 1) {
       pthread_attr_setcpu_np(&attr2, processor_id[nb_processors -2]);
    }
    sched_param2.sched_priority = 10;
    pthread_attr_setschedparam (&attr2, &sched_param2);
    ret = pthread_create(&pthread_test2, &attr2, thread_test2, (void*)1);

    return 0;
  } else {
    return  -ENODEV;
  }
}
//------------------------------------------------------------------------------
void cleanup_module(void) {
//------------------------------------------------------------------------------
  pthread_delete_np(pthread_test1);
  pthread_delete_np(pthread_test2);

  printk("COUNTER 1          = %016X\n", counter1);
  printk("CPU 1              = %d\n", cpu1);
  if (counter1) {
    printk("DURATION AVERAGE 1 = %d\n", duration1/counter1);
  }
  printk("COUNTER 2          = %016X\n", counter2);
  printk("CPU 2              = %d\n", cpu2);
  if (counter2) {
    printk("DURATION AVERAGE 2 = %d\n", duration2/counter2);
  }

  if (shm) {
    mbuff_free("wcdma",(void*)shm);
  }
}
//-----------------------------------------------------------------------------
inline void free_mem_block(struct mem_block *leP) {
//-----------------------------------------------------------------------------
    add_tail(leP,  &mem->mem_lists[leP->pool_id]);
}
//-----------------------------------------------------------------------------
inline struct mem_block* get_free_mem_block(u16 sizeP) {
//-----------------------------------------------------------------------------
  struct mem_block* le;

  if (sizeP <= MEM_MNGT_MB0_BLOCK_SIZE) {
    if ((le=remove_head( &mem->mem_lists[MEM_MNGT_POOL_ID0]))) {
      return le;
    }
  } 
  if (sizeP <= MEM_MNGT_MB1_BLOCK_SIZE) {
    if ((le=remove_head( &mem->mem_lists[MEM_MNGT_POOL_ID1]))) {
      return le;
    }
  } 
  if (sizeP <= MEM_MNGT_MB2_BLOCK_SIZE) {
    if ((le=remove_head( &mem->mem_lists[MEM_MNGT_POOL_ID2]))) {
      return le;
    }
  }
  etc
  return NULL;
};
//-----------------------------------------------------------------------------
inline void add_tail(struct mem_block *elementP,
              struct list* listP) {
//-----------------------------------------------------------------------------
  struct mem_block* tail;

  if (elementP != NULL) {
#ifdef TARGET_RT_LINUX
    pthread_mutex_lock(&listP->mutex);
#endif
    elementP->next     = NULL;
    tail = listP->tail;
    // almost one element
    if (tail == NULL) {
      listP->head = elementP;
    } else {
      tail->next = elementP;
    }
    listP->tail = elementP;
#ifdef TARGET_RT_LINUX
    pthread_mutex_unlock(&listP->mutex);
#endif
  } else {
    rtl_printf("[RLC][ERROR] add_tail() element NULL\n");
  }
}
//-----------------------------------------------------------------------------
inline struct mem_block* remove_head(struct list* listP) {
//-----------------------------------------------------------------------------
  struct mem_block* head;

#ifdef TARGET_RT_LINUX
  pthread_mutex_lock(&listP->mutex);
#endif  

  head = listP->head;
  // almost one element
  if (head != NULL) {
    listP->head = head->next;
    // if only one element, update tail
    if (listP->head == NULL) {
      listP->tail = NULL;
    } else {
      head->next = NULL;
    }
  }
#ifdef TARGET_RT_LINUX
  pthread_mutex_unlock(&listP->mutex);
#endif
  return head;
}
//-----------------------------------------------------------------------------
struct mem_block {
  struct mem_block *next;
  struct mem_block *previous;
  u8           pool_id;
  u8           *data;
};
struct list {
  struct mem_block *head;
  struct mem_block *tail;
  char   name[20];
#ifdef TARGET_RT_LINUX
  pthread_mutex_t mutex;
#endif
};
struct mem_pool {
  //-----------------------------------------------------------
  // basic memory management
  //-----------------------------------------------------------
  u8 mem_pool0[MEM_MNGT_MB0_NB_BLOCKS][MEM_MNGT_MB0_BLOCK_SIZE];
  u8 mem_pool1[MEM_MNGT_MB1_NB_BLOCKS][MEM_MNGT_MB1_BLOCK_SIZE];
  u8 mem_pool2[MEM_MNGT_MB2_NB_BLOCKS][MEM_MNGT_MB2_BLOCK_SIZE];
  u8 mem_pool3[MEM_MNGT_MB3_NB_BLOCKS][MEM_MNGT_MB3_BLOCK_SIZE];
  u8 mem_pool4[MEM_MNGT_MB4_NB_BLOCKS][MEM_MNGT_MB4_BLOCK_SIZE];
  u8 mem_pool5[MEM_MNGT_MB5_NB_BLOCKS][MEM_MNGT_MB5_BLOCK_SIZE];
  u8 mem_pool6[MEM_MNGT_MB6_NB_BLOCKS][MEM_MNGT_MB6_BLOCK_SIZE];
  struct mem_block   mem_blocks[MEM_MNGT_LE_NB_ELEMENTS];
  struct list mem_lists[7];

};
//-----------------------------------------------------------------------------
void init_list(struct list* listP, char* nameP) {
//-----------------------------------------------------------------------------
#ifdef TARGET_RT_LINUX
  pthread_mutexattr_t attr;
  int error_code;
#endif
  u8 i=0;
  if (nameP) {while ((listP->name[i] = nameP[i]));}
  listP->tail = NULL;
  listP->head = NULL;
#ifdef TARGET_RT_LINUX
  // tried several attributes (setpshared, protocol)without success for 
scheduling problem
  pthread_mutexattr_init(&attr);
  pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT);
  if ((error_code = pthread_mutex_init(&listP->mutex, &attr))) {
    rtl_printf("[MUTEX][ERROR] init_list(%s) %d\n", nameP, error_code);
  }
#endif
}



-- [rtl] ---
To unsubscribe:
echo "unsubscribe rtl" | mail [EMAIL PROTECTED] OR
echo "unsubscribe rtl <Your_email>" | mail [EMAIL PROTECTED]
--
For more information on Real-Time Linux see:
http://www.rtlinux.org/

Reply via email to