I'm no expert on PPP, but if you're hoping the sequential API is faster
than the raw API, you're going to be disappointed.  It's much slower in
most cases.

exactly. Your PPP is as efficient as your link connection and sio_read function. I've modified my sio_read function several times - it is hard to do it finally ideal with queues of OS like eg. freeRTOS, interrupts and flow control. You should check by debug console how often you receive ppp packets (netif: pppInput) and send packets (netif: pppOutput).

example:
static char comm_read_blocked[COMM_MAX];

int comm_read(int port, char *buf, int n)
{
char line;
int len;

if (n < 1) return COMM_ERRBUFSIZE;
comm_read_blocked[port] = 1;
comm_blockRx(port);
len = 0;
while (1) {         // readout
 // getc with line
 if (xQueueReceive( xCharsRx[port], buf, comRX_BLOCK_TIME ) != pdTRUE) {
   //kprintf("comm_read COMM_ERRNOCHAR!\n");
   if (!comm_read_blocked[port]) {
    comm_unblockRx(port);
    return COMM_READABORT;
   }
   if (len != 0) break;
 } else {
  if (*buf == comLINECHAR) {
   if (xQueueReceive( xLinesRx[port], &line, comRX_BLOCK_TIME ) != pdTRUE)
    kprintf("comm_read COMM_ERRQUEUE!\n");
  }

  if (xFlowCTL[port] == FLOWCTRL_SW) {
   switch(port) {
    case USART0:
     if (uxQueueMessagesWaiting(xCharsRx[USART0]) <= USART0_RX_LOWATER)
      USART0_clrRTS();
     break;

    case USART1:
     if (uxQueueMessagesWaiting(xCharsRx[USART1]) <= USART1_RX_LOWATER)
      USART1_clrRTS();
     break;

    case DUSART:
     break;

    default:
     comm_unblockRx(port);
     return COMM_ERRPORT;
   } // switch
  }
  buf++;
  len++;
  if (len >= n) break;    // buffer is full
 }
}
comm_unblockRx(port);
   return len;
}

best regards
Janusz U.



_______________________________________________
lwip-users mailing list
[email protected]
http://lists.nongnu.org/mailman/listinfo/lwip-users

Reply via email to