I've improved the ASSERT macro in kernel/include/monitor.h to dump the 
expression that was asserted, as well as the file and line number of the 
failure of the assertion.

I've also fixed kernel/phymem-mon.c to allow physical memory access to 
inexistant physical memory pages, because in real life, such access *is* 
valid, and simply has undefined results.  The panic that was previously there 
was commented out, and in the write function, "return;" was added.  The panic 
did not allow my newly experiemental os to run since it tested the amount of 
physical memory by finding the first page that has undefined read/write 
results :)

I am not sure that all the effects of my changes are clear and correct, but 
it works for me.

Please manually incorporate the code differences around the comments 
containing "Eyal Lotem" (my name) into the new version.
/*
 *  plex86: run multiple x86 operating systems concurrently
 *  Copyright (C) 1999-2001 Kevin P. Lawton
 *
 *  phymem-mon.c:  physical memory access code
 *
 *  This library is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU Lesser General Public
 *  License as published by the Free Software Foundation; either
 *  version 2 of the License, or (at your option) any later version.
 *
 *  This library is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 *  Lesser General Public License for more details.
 *
 *  You should have received a copy of the GNU Lesser General Public
 *  License along with this library; if not, write to the Free Software
 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 */


#include "plex86.h"
#define IN_MONITOR_SPACE
#include "monitor.h"


  void
read_physical(vm_t *vm, Bit32u paddr, unsigned length, void *data_v)
{
  phy_page_usage_t *page_usage;
  Bit32u ppage_index, ppage_offset;
  Bit8u *page_laddr, *data;
  unsigned i, l0, multiple_pages;

  /* paddr is the physical address _after_ A20 Enable applied */
  /* +++ should deal with writes to ROM */
  /* +++ should deal with writes to UC mem areas like VGA framebuffer */

  data = data_v;
  ppage_index = paddr >> 12;
  ppage_offset = paddr & 0xfff;

next_page:
  if (ppage_index >= vm->pages.guest_n_pages) {
/*    Eyal Lotem: Reading inexistant physical memory is not a panic, it should simply return */
/*                undefined results */
/*    monpanic(vm, "read_physical OOB: 0x%x\n", paddr); */
/*    for (i=0; i<length; i++) */
/*      ((unsigned char *) data_v)[i] = 0xff; */
    return;
    }
  page_usage = getPageUsage(vm, ppage_index);
  l0 = length;
  multiple_pages = 0;
  if ( (ppage_offset + length) > 4096 ) {
    l0 = 4096 - ppage_offset;
    multiple_pages = 1;
    }
  if (page_usage->attr.fields.access_perm == PagePermEmulate) {
    Bit32u raw;

    raw = page_usage->attr.raw;
    if (raw & PageUsagePTbl) {
      /* We must update the A&D bits of the page table according with
       * the A&D bits in the actual monitor page tables first.  Then
       * we can let the read occur as usual.
       */
      updateGuestPTbl(vm, ppage_index);
      raw &= ~PageUsagePTbl;
      }

    if (raw & PageUsagePDir) {
      updateGuestPDir(vm, ppage_index);
      raw &= ~PageUsagePDir;
      }
    if (raw & PageUsageVCode) {
      monpanic(vm, "read_phy: PageUsageVCode\n");
      raw &= ~PageUsageVCode;
      }
    if (raw & PageUsageMemMapIO) {
      /* This physical address corresponds to a memory mapped IO
       * device (for example VGA).  Redirect the access to the
       * device emulation
       */
      /* Bit32u dword_boundary_start, dword_boundary_end; */
      Bit32u temp_data;
      Bit8u *src;

      /* dword_boundary_start = ppage_offset >> 2; */
      /* dword_boundary_end   = (ppage_offset + l0 - 1) >> 2; */
      /* if (dword_boundary_start != dword_boundary_end) { */
      /*   monpanic(vm, "read_phy: MemMapIO crosses dword boundary.\n"); */
      /*   } */
      if (l0 > 4)
        monpanic(vm, "read_phy: MemMapIO access > 4.\n");
      temp_data = sysMemMapIORead(vm, (ppage_index<<12) | ppage_offset, l0);
      src = (Bit8u *) &temp_data;
      for (i=0; i<l0; i++) {
        *data++ = *src++;
        }
      raw &= ~PageUsageMemMapIO;
      goto incr_next_page;
      }
    }
  page_laddr = open_guest_phy_page(vm, ppage_index,
                                   vm->guest.addr.tmp_phy_page0);
  for (i=0; i<l0; i++)
    *data++ = page_laddr[ppage_offset++];

incr_next_page:
  if (!multiple_pages)
    return;
  length -= l0;
  ppage_index = A20PageIndex(vm, ppage_index + 1);
  ppage_offset = 0;
  goto next_page;
}

  void
write_physical(vm_t *vm, Bit32u paddr, unsigned length, void *data_v)
{
  phy_page_usage_t *page_usage;
  Bit32u ppage_index, ppage_offset;
  Bit8u *page_laddr, *data;
  unsigned i, l0, multiple_pages;

  /* +++ see notes for read_physical */

  data = data_v;
  ppage_index = paddr >> 12;
  ppage_offset = paddr & 0xfff;

next_page:
  if (ppage_index >= vm->pages.guest_n_pages) {
/*    Eyal Lotem: Writing inexistant physical memory is not a panic, it should simply not do */
/*                much */
/*    monpanic(vm, "write_physical: OOB\n"); */
    return;
    }
  page_usage = getPageUsage(vm, ppage_index);
  l0 = length;
  multiple_pages = 0;
  if ( (ppage_offset + length) > 4096 ) {
    l0 = 4096 - ppage_offset;
    multiple_pages = 1;
    }
  if (page_usage->attr.fields.access_perm != PagePermRW) {
    Bit32u raw;
    raw = page_usage->attr.raw;
    if (raw & PageUsagePTbl) {
      /* We must update the A&D bits of the page table according with
       * the A&D bits in the actual monitor page tables first.  Then
       * we can let the write occur.  Also schedule a monitor page
       * table rebuild, because of the change.
       */
      updateGuestPTbl(vm, ppage_index);
      vm->modeChange |= ModeChangeEventPaging | ModeChangeRequestPaging;
      raw &= ~PageUsagePTbl;
      }
    if (raw & PageUsagePDir) {
      updateGuestPDir(vm, ppage_index);
      vm->modeChange |= ModeChangeEventPaging | ModeChangeRequestPaging;
      raw &= ~PageUsagePDir;
      }
    if (raw & PageUsageVCode) {
      removePageAttributes(vm, paddr>>12, PageUsageVCode);
      raw &= ~PageUsageVCode;
      }
    if (raw & PageUsageMemMapIO) {
      /* This physical address corresponds to a memory mapped IO
       * device (for example VGA).  Redirect the access to the
       * device emulation
       */
      /* Bit32u dword_boundary_start, dword_boundary_end; */
      Bit32u temp_data;
      Bit8u *dst;

      /* dword_boundary_start = ppage_offset >> 2; */
      /* dword_boundary_end   = (ppage_offset + l0 - 1) >> 2; */
      /* if (dword_boundary_start != dword_boundary_end) { */
      /*   monpanic(vm, "write_phy: MemMapIO crosses dword boundary.\n"); */
      /*   } */
      if (l0 > 4)
        monpanic(vm, "write_phy: MemMapIO access > 4.\n");
      temp_data = 0; /* zero out initial value */
      dst = (Bit8u *) &temp_data;
      for (i=0; i<l0; i++) {
        *dst++ = *data++;
        }
      sysMemMapIOWrite(vm, (ppage_index<<12) | ppage_offset, l0, temp_data);
      raw &= ~PageUsageMemMapIO;
      goto incr_next_page;
      }
    /*monprint(vm, "raw=0x%x\n", page_usage->attr.raw); */
    /*monpanic(vm, "write_physical: PagePerm != RW\n"); */
    }
  page_laddr = open_guest_phy_page(vm, ppage_index,
                                   vm->guest.addr.tmp_phy_page0);
  for (i=0; i<l0; i++)
    page_laddr[ppage_offset++] = *data++;

incr_next_page:
  if (!multiple_pages)
    return;
  length -= l0;
  ppage_index = A20PageIndex(vm, ppage_index + 1);
  ppage_offset = 0;
  goto next_page;
}

Reply via email to