Hello all,
Here's an initial port of adeos and rtai/fusion to 64-bit powerpc Taneli
Vähäkangas and myself have been working on. Unfortunately the port isn't
yet fully functional, but due to timing constraints I have to shove it
in (out) now ("kunhan hänessä on"). It's developed and tested on a G5
PowerMac; IBM Iseries code is untouched. Current status is:
* compiles - makefile structure should be ok
* Linux/adeos boots
* fusion modules load and unload
* fusion timers probably don't work
* fusion fpu support is untested and probably doesn't work
* A hello world rt thread seems to work :)
* much of the stuff could be integrated with the ppc32 from which it
differs very little, but should probably be made more stable before that
Regards,
Heikki Lindholm
diff -Nru fusion-0.7.1/GNUmakefile.in fusion-0.7.1-ppc64-devel/GNUmakefile.in
--- fusion-0.7.1/GNUmakefile.in 2005-04-02 17:26:28.000000000 +0300
+++ fusion-0.7.1-ppc64-devel/GNUmakefile.in 2005-05-16 15:19:31.000000000
+0300
@@ -93,6 +93,8 @@
CONFIG_LTT_TRUE = @CONFIG_LTT_TRUE@
CONFIG_PPC_FALSE = @CONFIG_PPC_FALSE@
CONFIG_PPC_TRUE = @CONFIG_PPC_TRUE@
+CONFIG_PPC64_FALSE = @CONFIG_PPC64_FALSE@
+CONFIG_PPC64_TRUE = @CONFIG_PPC64_TRUE@
CONFIG_RTAI_DOC_DOX_FALSE = @CONFIG_RTAI_DOC_DOX_FALSE@
CONFIG_RTAI_DOC_DOX_TRUE = @CONFIG_RTAI_DOC_DOX_TRUE@
CONFIG_RTAI_DRIVERS_16550A_FALSE = @CONFIG_RTAI_DRIVERS_16550A_FALSE@
diff -Nru fusion-0.7.1/arch/GNUmakefile.am
fusion-0.7.1-ppc64-devel/arch/GNUmakefile.am
--- fusion-0.7.1/arch/GNUmakefile.am 2004-12-24 11:12:11.000000000 +0200
+++ fusion-0.7.1-ppc64-devel/arch/GNUmakefile.am 2005-05-16
14:55:37.000000000 +0300
@@ -6,4 +6,8 @@
ARCHDIR = ppc
endif
+if CONFIG_PPC64
+ARCHDIR = ppc64
+endif
+
SUBDIRS = $(ARCHDIR)
diff -Nru fusion-0.7.1/arch/GNUmakefile.in
fusion-0.7.1-ppc64-devel/arch/GNUmakefile.in
--- fusion-0.7.1/arch/GNUmakefile.in 2005-04-02 17:26:21.000000000 +0300
+++ fusion-0.7.1-ppc64-devel/arch/GNUmakefile.in 2005-05-16
14:57:16.000000000 +0300
@@ -55,7 +55,7 @@
uninstall-recursive
ETAGS = etags
CTAGS = ctags
-DIST_SUBDIRS = ppc i386
+DIST_SUBDIRS = ppc ppc64 i386
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
AMDEP_FALSE = @AMDEP_FALSE@
@@ -267,6 +267,7 @@
sysconfdir = @sysconfdir@
target_alias = @target_alias@
@[EMAIL PROTECTED] = ppc
[EMAIL PROTECTED]@ARCHDIR = ppc64
@[EMAIL PROTECTED] = i386
SUBDIRS = $(ARCHDIR)
all: all-recursive
diff -Nru fusion-0.7.1/arch/ppc64/GNUmakefile.am
fusion-0.7.1-ppc64-devel/arch/ppc64/GNUmakefile.am
--- fusion-0.7.1/arch/ppc64/GNUmakefile.am 1970-01-01 02:00:00.000000000
+0200
+++ fusion-0.7.1-ppc64-devel/arch/ppc64/GNUmakefile.am 2004-12-24
11:12:11.000000000 +0200
@@ -0,0 +1,3 @@
+SUBDIRS = hal
+
+EXTRA_DIST = Kconfig defconfig patches
diff -Nru fusion-0.7.1/arch/ppc64/GNUmakefile.in
fusion-0.7.1-ppc64-devel/arch/ppc64/GNUmakefile.in
--- fusion-0.7.1/arch/ppc64/GNUmakefile.in 1970-01-01 02:00:00.000000000
+0200
+++ fusion-0.7.1-ppc64-devel/arch/ppc64/GNUmakefile.in 2005-05-16
15:26:20.000000000 +0300
@@ -0,0 +1,569 @@
+# GNUmakefile.in generated by automake 1.9.2 from GNUmakefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004 Free Software Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
[EMAIL PROTECTED]@
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+top_builddir = ../..
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+INSTALL = @INSTALL@
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = arch/ppc64
+DIST_COMMON = $(srcdir)/GNUmakefile.am $(srcdir)/GNUmakefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/config/autoconf/docbook.m4 \
+ $(top_srcdir)/configure.in
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(SHELL) $(top_srcdir)/config/autoconf/mkinstalldirs
+CONFIG_HEADER = $(top_builddir)/rtai_config.h
+CONFIG_CLEAN_FILES =
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \
+ html-recursive info-recursive install-data-recursive \
+ install-exec-recursive install-info-recursive \
+ install-recursive installcheck-recursive installdirs-recursive \
+ pdf-recursive ps-recursive uninstall-info-recursive \
+ uninstall-recursive
+ETAGS = etags
+CTAGS = ctags
+DIST_SUBDIRS = $(SUBDIRS)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMDEP_FALSE = @AMDEP_FALSE@
+AMDEP_TRUE = @AMDEP_TRUE@
+AMTAR = @AMTAR@
+AR = @AR@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCAS = @CCAS@
+CCASFLAGS = @CCASFLAGS@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CONFIG_LTT_FALSE = @CONFIG_LTT_FALSE@
+CONFIG_LTT_TRUE = @CONFIG_LTT_TRUE@
+CONFIG_PPC64_FALSE = @CONFIG_PPC64_FALSE@
+CONFIG_PPC64_TRUE = @CONFIG_PPC64_TRUE@
+CONFIG_RTAI_DOC_DOX_FALSE = @CONFIG_RTAI_DOC_DOX_FALSE@
+CONFIG_RTAI_DOC_DOX_TRUE = @CONFIG_RTAI_DOC_DOX_TRUE@
+CONFIG_RTAI_DRIVERS_16550A_FALSE = @CONFIG_RTAI_DRIVERS_16550A_FALSE@
+CONFIG_RTAI_DRIVERS_16550A_TRUE = @CONFIG_RTAI_DRIVERS_16550A_TRUE@
+CONFIG_RTAI_HW_FPU_FALSE = @CONFIG_RTAI_HW_FPU_FALSE@
+CONFIG_RTAI_HW_FPU_TRUE = @CONFIG_RTAI_HW_FPU_TRUE@
+CONFIG_RTAI_HW_SMI_DETECT_FALSE = @CONFIG_RTAI_HW_SMI_DETECT_FALSE@
+CONFIG_RTAI_HW_SMI_DETECT_TRUE = @CONFIG_RTAI_HW_SMI_DETECT_TRUE@
+CONFIG_RTAI_MAINT_FALSE = @CONFIG_RTAI_MAINT_FALSE@
+CONFIG_RTAI_MAINT_GCH_FALSE = @CONFIG_RTAI_MAINT_GCH_FALSE@
+CONFIG_RTAI_MAINT_GCH_TRUE = @CONFIG_RTAI_MAINT_GCH_TRUE@
+CONFIG_RTAI_MAINT_PGM_FALSE = @CONFIG_RTAI_MAINT_PGM_FALSE@
+CONFIG_RTAI_MAINT_PGM_TRUE = @CONFIG_RTAI_MAINT_PGM_TRUE@
+CONFIG_RTAI_MAINT_TRUE = @CONFIG_RTAI_MAINT_TRUE@
+CONFIG_RTAI_OLD_FASHIONED_BUILD_FALSE = @CONFIG_RTAI_OLD_FASHIONED_BUILD_FALSE@
+CONFIG_RTAI_OLD_FASHIONED_BUILD_TRUE = @CONFIG_RTAI_OLD_FASHIONED_BUILD_TRUE@
+CONFIG_RTAI_OPT_FUSION_FALSE = @CONFIG_RTAI_OPT_FUSION_FALSE@
+CONFIG_RTAI_OPT_FUSION_TRUE = @CONFIG_RTAI_OPT_FUSION_TRUE@
+CONFIG_RTAI_OPT_NATIVE_ALARM_FALSE = @CONFIG_RTAI_OPT_NATIVE_ALARM_FALSE@
+CONFIG_RTAI_OPT_NATIVE_ALARM_TRUE = @CONFIG_RTAI_OPT_NATIVE_ALARM_TRUE@
+CONFIG_RTAI_OPT_NATIVE_COND_FALSE = @CONFIG_RTAI_OPT_NATIVE_COND_FALSE@
+CONFIG_RTAI_OPT_NATIVE_COND_TRUE = @CONFIG_RTAI_OPT_NATIVE_COND_TRUE@
+CONFIG_RTAI_OPT_NATIVE_EVENT_FALSE = @CONFIG_RTAI_OPT_NATIVE_EVENT_FALSE@
+CONFIG_RTAI_OPT_NATIVE_EVENT_TRUE = @CONFIG_RTAI_OPT_NATIVE_EVENT_TRUE@
+CONFIG_RTAI_OPT_NATIVE_HEAP_FALSE = @CONFIG_RTAI_OPT_NATIVE_HEAP_FALSE@
+CONFIG_RTAI_OPT_NATIVE_HEAP_TRUE = @CONFIG_RTAI_OPT_NATIVE_HEAP_TRUE@
+CONFIG_RTAI_OPT_NATIVE_INTR_FALSE = @CONFIG_RTAI_OPT_NATIVE_INTR_FALSE@
+CONFIG_RTAI_OPT_NATIVE_INTR_TRUE = @CONFIG_RTAI_OPT_NATIVE_INTR_TRUE@
+CONFIG_RTAI_OPT_NATIVE_MUTEX_FALSE = @CONFIG_RTAI_OPT_NATIVE_MUTEX_FALSE@
+CONFIG_RTAI_OPT_NATIVE_MUTEX_TRUE = @CONFIG_RTAI_OPT_NATIVE_MUTEX_TRUE@
+CONFIG_RTAI_OPT_NATIVE_PIPE_FALSE = @CONFIG_RTAI_OPT_NATIVE_PIPE_FALSE@
+CONFIG_RTAI_OPT_NATIVE_PIPE_TRUE = @CONFIG_RTAI_OPT_NATIVE_PIPE_TRUE@
+CONFIG_RTAI_OPT_NATIVE_QUEUE_FALSE = @CONFIG_RTAI_OPT_NATIVE_QUEUE_FALSE@
+CONFIG_RTAI_OPT_NATIVE_QUEUE_TRUE = @CONFIG_RTAI_OPT_NATIVE_QUEUE_TRUE@
+CONFIG_RTAI_OPT_NATIVE_REGISTRY_FALSE = @CONFIG_RTAI_OPT_NATIVE_REGISTRY_FALSE@
+CONFIG_RTAI_OPT_NATIVE_REGISTRY_TRUE = @CONFIG_RTAI_OPT_NATIVE_REGISTRY_TRUE@
+CONFIG_RTAI_OPT_NATIVE_SEM_FALSE = @CONFIG_RTAI_OPT_NATIVE_SEM_FALSE@
+CONFIG_RTAI_OPT_NATIVE_SEM_TRUE = @CONFIG_RTAI_OPT_NATIVE_SEM_TRUE@
+CONFIG_RTAI_OPT_PIPE_FALSE = @CONFIG_RTAI_OPT_PIPE_FALSE@
+CONFIG_RTAI_OPT_PIPE_TRUE = @CONFIG_RTAI_OPT_PIPE_TRUE@
+CONFIG_RTAI_OPT_UVM_FALSE = @CONFIG_RTAI_OPT_UVM_FALSE@
+CONFIG_RTAI_OPT_UVM_TRUE = @CONFIG_RTAI_OPT_UVM_TRUE@
+CONFIG_RTAI_SKIN_NATIVE_FALSE = @CONFIG_RTAI_SKIN_NATIVE_FALSE@
+CONFIG_RTAI_SKIN_NATIVE_TRUE = @CONFIG_RTAI_SKIN_NATIVE_TRUE@
+CONFIG_RTAI_SKIN_POSIX_FALSE = @CONFIG_RTAI_SKIN_POSIX_FALSE@
+CONFIG_RTAI_SKIN_POSIX_TRUE = @CONFIG_RTAI_SKIN_POSIX_TRUE@
+CONFIG_RTAI_SKIN_PSOS_FALSE = @CONFIG_RTAI_SKIN_PSOS_FALSE@
+CONFIG_RTAI_SKIN_PSOS_TRUE = @CONFIG_RTAI_SKIN_PSOS_TRUE@
+CONFIG_RTAI_SKIN_UITRON_FALSE = @CONFIG_RTAI_SKIN_UITRON_FALSE@
+CONFIG_RTAI_SKIN_UITRON_TRUE = @CONFIG_RTAI_SKIN_UITRON_TRUE@
+CONFIG_RTAI_SKIN_VRTX_FALSE = @CONFIG_RTAI_SKIN_VRTX_FALSE@
+CONFIG_RTAI_SKIN_VRTX_TRUE = @CONFIG_RTAI_SKIN_VRTX_TRUE@
+CONFIG_RTAI_SKIN_VXWORKS_FALSE = @CONFIG_RTAI_SKIN_VXWORKS_FALSE@
+CONFIG_RTAI_SKIN_VXWORKS_TRUE = @CONFIG_RTAI_SKIN_VXWORKS_TRUE@
+CONFIG_SMP_FALSE = @CONFIG_SMP_FALSE@
+CONFIG_SMP_TRUE = @CONFIG_SMP_TRUE@
+CONFIG_X86_FALSE = @CONFIG_X86_FALSE@
+CONFIG_X86_LOCAL_APIC_FALSE = @CONFIG_X86_LOCAL_APIC_FALSE@
+CONFIG_X86_LOCAL_APIC_TRUE = @CONFIG_X86_LOCAL_APIC_TRUE@
+CONFIG_X86_TRUE = @CONFIG_X86_TRUE@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CROSS_COMPILE = @CROSS_COMPILE@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DBX_ABS_SRCDIR_FALSE = @DBX_ABS_SRCDIR_FALSE@
+DBX_ABS_SRCDIR_TRUE = @DBX_ABS_SRCDIR_TRUE@
+DBX_DOC_FALSE = @DBX_DOC_FALSE@
+DBX_DOC_ROOT = @DBX_DOC_ROOT@
+DBX_DOC_TRUE = @DBX_DOC_TRUE@
+DBX_FOP = @DBX_FOP@
+DBX_GEN_DOC_ROOT = @DBX_GEN_DOC_ROOT@
+DBX_LINT = @DBX_LINT@
+DBX_MAYBE_NONET = @DBX_MAYBE_NONET@
+DBX_ROOT = @DBX_ROOT@
+DBX_XSLTPROC = @DBX_XSLTPROC@
+DBX_XSL_ROOT = @DBX_XSL_ROOT@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DOXYGEN = @DOXYGEN@
+DOXYGEN_HAVE_DOT = @DOXYGEN_HAVE_DOT@
+DOXYGEN_SHOW_INCLUDE_FILES = @DOXYGEN_SHOW_INCLUDE_FILES@
+ECHO = @ECHO@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+F77 = @F77@
+FFLAGS = @FFLAGS@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LATEX_BATCHMODE = @LATEX_BATCHMODE@
+LATEX_MODE = @LATEX_MODE@
+LDFLAGS = @LDFLAGS@
+LEX = @LEX@
+LEXLIB = @LEXLIB@
+LEX_OUTPUT_ROOT = @LEX_OUTPUT_ROOT@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAINTAINER_MODE_FALSE = @MAINTAINER_MODE_FALSE@
+MAINTAINER_MODE_TRUE = @MAINTAINER_MODE_TRUE@
+MAKEINFO = @MAKEINFO@
+OBJEXT = @OBJEXT@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+RANLIB = @RANLIB@
+RTAI_BUILD_STRING = @RTAI_BUILD_STRING@
+RTAI_FP_CFLAGS = @RTAI_FP_CFLAGS@
+RTAI_HOST_STRING = @RTAI_HOST_STRING@
+RTAI_KBUILD_BOTTOM = @RTAI_KBUILD_BOTTOM@
+RTAI_KBUILD_CLEAN = @RTAI_KBUILD_CLEAN@
+RTAI_KBUILD_CMD = @RTAI_KBUILD_CMD@
+RTAI_KBUILD_DISTCLEAN = @RTAI_KBUILD_DISTCLEAN@
+RTAI_KBUILD_ENV = @RTAI_KBUILD_ENV@
+RTAI_KBUILD_TOP = @RTAI_KBUILD_TOP@
+RTAI_KMOD_APP_CFLAGS = @RTAI_KMOD_APP_CFLAGS@
+RTAI_KMOD_CFLAGS = @RTAI_KMOD_CFLAGS@
+RTAI_LINUX_DIR = @RTAI_LINUX_DIR@
+RTAI_LINUX_VERSION = @RTAI_LINUX_VERSION@
+RTAI_MAYBE_DOCDIR = @RTAI_MAYBE_DOCDIR@
+RTAI_MAYBE_SIMDIR = @RTAI_MAYBE_SIMDIR@
+RTAI_MODULE_DIR = @RTAI_MODULE_DIR@
+RTAI_MODULE_EXT = @RTAI_MODULE_EXT@
+RTAI_PIPE_NRDEV = @RTAI_PIPE_NRDEV@
+RTAI_TARGET_ARCH = @RTAI_TARGET_ARCH@
+RTAI_TARGET_SUBARCH = @RTAI_TARGET_SUBARCH@
+RTAI_USER_APP_CFLAGS = @RTAI_USER_APP_CFLAGS@
+RTAI_USER_CFLAGS = @RTAI_USER_CFLAGS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STRIP = @STRIP@
+VERSION = @VERSION@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_F77 = @ac_ct_F77@
+ac_ct_RANLIB = @ac_ct_RANLIB@
+ac_ct_STRIP = @ac_ct_STRIP@
+am__fastdepCC_FALSE = @am__fastdepCC_FALSE@
+am__fastdepCC_TRUE = @am__fastdepCC_TRUE@
+am__fastdepCXX_FALSE = @am__fastdepCXX_FALSE@
+am__fastdepCXX_TRUE = @am__fastdepCXX_TRUE@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+datadir = @datadir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+subdirs = @subdirs@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+SUBDIRS = hal
+EXTRA_DIST = Kconfig defconfig patches
+all: all-recursive
+
+.SUFFIXES:
+$(srcdir)/GNUmakefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/GNUmakefile.am
$(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
+ && exit 0; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign
arch/ppc64/GNUmakefile'; \
+ cd $(top_srcdir) && \
+ $(AUTOMAKE) --foreign arch/ppc64/GNUmakefile
+.PRECIOUS: GNUmakefile
+GNUmakefile: $(srcdir)/GNUmakefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@
$(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@
$(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure
$(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+mostlyclean-libtool:
+ -rm -f *.lo
+
+clean-libtool:
+ -rm -rf .libs _libs
+
+distclean-libtool:
+ -rm -f libtool
+uninstall-info-am:
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run `make' without going through this Makefile.
+# To change the values of `make' variables: instead of editing Makefiles,
+# (1) if the variable is set in `config.status', edit `config.status'
+# (which will cause the Makefiles to be regenerated when you run `make');
+# (2) otherwise, pass the desired values on the `make' command line.
+$(RECURSIVE_TARGETS):
+ @set fnord $$MAKEFLAGS; amf=$$2; \
+ dot_seen=no; \
+ target=`echo $@ | sed s/-recursive//`; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ dot_seen=yes; \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || case "$$amf" in *=*) exit 1;; *k*) fail=yes;; *) exit 1;; esac; \
+ done; \
+ if test "$$dot_seen" = "no"; then \
+ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+ fi; test -z "$$fail"
+
+mostlyclean-recursive clean-recursive distclean-recursive \
+maintainer-clean-recursive:
+ @set fnord $$MAKEFLAGS; amf=$$2; \
+ dot_seen=no; \
+ case "$@" in \
+ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+ *) list='$(SUBDIRS)' ;; \
+ esac; \
+ rev=''; for subdir in $$list; do \
+ if test "$$subdir" = "."; then :; else \
+ rev="$$subdir $$rev"; \
+ fi; \
+ done; \
+ rev="$$rev ."; \
+ target=`echo $@ | sed s/-recursive//`; \
+ for subdir in $$rev; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || case "$$amf" in *=*) exit 1;; *k*) fail=yes;; *) exit 1;; esac; \
+ done && test -z "$$fail"
+tags-recursive:
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags);
\
+ done
+ctags-recursive:
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS)
ctags); \
+ done
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) ' { files[$$0] = 1; } \
+ END { for (i in files) print i; }'`; \
+ mkid -fID $$unique
+tags: TAGS
+
+TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ tags=; \
+ here=`pwd`; \
+ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+ include_option=--etags-include; \
+ empty_fix=.; \
+ else \
+ include_option=--include; \
+ empty_fix=; \
+ fi; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test ! -f $$subdir/TAGS || \
+ tags="$$tags $$include_option=$$here/$$subdir/TAGS"; \
+ fi; \
+ done; \
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) ' { files[$$0] = 1; } \
+ END { for (i in files) print i; }'`; \
+ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$tags $$unique; \
+ fi
+ctags: CTAGS
+CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ tags=; \
+ here=`pwd`; \
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) ' { files[$$0] = 1; } \
+ END { for (i in files) print i; }'`; \
+ test -z "$(CTAGS_ARGS)$$tags$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$tags $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && cd $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) $$here
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \
+ list='$(DISTFILES)'; for file in $$list; do \
+ case $$file in \
+ $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \
+ $(top_srcdir)/*) file=`echo "$$file" | sed
"s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \
+ esac; \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test "$$dir" != "$$file" && test "$$dir" != "."; then \
+ dir="/$$dir"; \
+ $(mkdir_p) "$(distdir)$$dir"; \
+ else \
+ dir=''; \
+ fi; \
+ if test -d $$d/$$file; then \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
+ fi; \
+ cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
+ else \
+ test -f $(distdir)/$$file \
+ || cp -p $$d/$$file $(distdir)/$$file \
+ || exit 1; \
+ fi; \
+ done
+ list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test -d "$(distdir)/$$subdir" \
+ || $(mkdir_p) "$(distdir)/$$subdir" \
+ || exit 1; \
+ distdir=`$(am__cd) $(distdir) && pwd`; \
+ top_distdir=`$(am__cd) $(top_distdir) && pwd`; \
+ (cd $$subdir && \
+ $(MAKE) $(AM_MAKEFLAGS) \
+ top_distdir="$$top_distdir" \
+ distdir="$$distdir/$$subdir" \
+ distdir) \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-recursive
+all-am: GNUmakefile
+installdirs: installdirs-recursive
+installdirs-am:
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ `test -z '$(STRIP)' || \
+ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+clean: clean-recursive
+
+clean-am: clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-recursive
+ -rm -f GNUmakefile
+distclean-am: clean-am distclean-generic distclean-libtool \
+ distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+info: info-recursive
+
+info-am:
+
+install-data-am:
+
+install-exec-am:
+
+install-info: install-info-recursive
+
+install-man:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+ -rm -f GNUmakefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic mostlyclean-libtool
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am: uninstall-info-am
+
+uninstall-info: uninstall-info-recursive
+
+.PHONY: $(RECURSIVE_TARGETS) CTAGS GTAGS all all-am check check-am \
+ clean clean-generic clean-libtool clean-recursive ctags \
+ ctags-recursive distclean distclean-generic distclean-libtool \
+ distclean-recursive distclean-tags distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-exec install-exec-am install-info \
+ install-info-am install-man install-strip installcheck \
+ installcheck-am installdirs installdirs-am maintainer-clean \
+ maintainer-clean-generic maintainer-clean-recursive \
+ mostlyclean mostlyclean-generic mostlyclean-libtool \
+ mostlyclean-recursive pdf pdf-am ps ps-am tags tags-recursive \
+ uninstall uninstall-am uninstall-info-am
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff -Nru fusion-0.7.1/arch/ppc64/Kconfig
fusion-0.7.1-ppc64-devel/arch/ppc64/Kconfig
--- fusion-0.7.1/arch/ppc64/Kconfig 1970-01-01 02:00:00.000000000 +0200
+++ fusion-0.7.1-ppc64-devel/arch/ppc64/Kconfig 2005-05-16 15:35:18.000000000
+0300
@@ -0,0 +1,70 @@
+mainmenu "RTAI/powerpc64 configuration"
+
+source Kconfig
+
+source "nucleus/Kconfig"
+
+menu "Machine (powerpc64)"
+
+config RTAI_HW_FPU
+ bool "Enable FPU support"
+ default y
+ help
+ The FPU executes instructions from the processor's normal
+ instruction stream. It can handle the types of high-precision
+ floating-point processing operations commonly found in
+ scientific, engineering, and business applications. Enabling
+ FPU support on a platform providing this hardware component
+ may greatly improve performances.
+
+config RTAI_HW_APERIODIC_TIMER
+ bool "Enable aperiodic timer support"
+ default y
+ help
+ The nucleus can handle aperiodic timers that can be
+ programmed in one-shot mode on this architecture. In this
+ mode, timing accuracy is higher - since it is not rounded to a
+ constant time slice - at the expense of a lesser efficicency
+ when many timers are simultaneously active. The aperiodic mode
+ gives better results in configuration involving a few threads
+ requesting timing services over different time scales that
+ cannot be easily expressed as multiples of a single base tick,
+ or would lead to a waste of high frequency periodical
+ ticks. You can disable this support for this architecture to
+ save a few hundreds bytes if you plan to use the system timer
+ in periodic mode only.
+
+config RTAI_HW_TIMER_LATENCY
+ depends on RTAI_OPT_EXPERT
+ string "Timer tuning latency (ns)"
+ default 0
+ help
+ This parameter accounts for the time (in nanoseconds) needed
+ to program the underlying time source in one-shot timing mode.
+ This value will be used to reduce the scheduling jitter induced
+ by the time needed to setup the timer for its next shot. A
+ default value of 0 (recommended) will cause this value to be
+ estimated by the nucleus at startup.
+
+config RTAI_HW_SCHED_LATENCY
+ depends on RTAI_OPT_EXPERT
+ string "Scheduling latency (ns)"
+ default 0
+ help
+ Scheduling latency is the time between the termination of an
+ interrupt handler and the execution of the first instruction
+ of the real-time thread this handler resumes. A
+ default value of 0 (recommended) will cause this value to be
+ estimated by the nucleus at startup.
+
+endmenu
+
+source "skins/Kconfig"
+
+menu "Drivers"
+
+source "drivers/Kconfig"
+
+endmenu
+
+source "sim/Kconfig"
diff -Nru fusion-0.7.1/arch/ppc64/defconfig
fusion-0.7.1-ppc64-devel/arch/ppc64/defconfig
--- fusion-0.7.1/arch/ppc64/defconfig 1970-01-01 02:00:00.000000000 +0200
+++ fusion-0.7.1-ppc64-devel/arch/ppc64/defconfig 2005-05-16
15:25:05.000000000 +0300
@@ -0,0 +1,66 @@
+#
+# Automatically generated make config: don't edit
+#
+CONFIG_MODULES=y
+CONFIG_RTAI_VERSION="0.7 (fusion)"
+
+#
+# General
+#
+CONFIG_RTAI_INSTALLDIR="/usr/realtime"
+CONFIG_RTAI_LINUXDIR="/lib/modules/`uname -r`/build"
+
+#
+# Documentation
+#
+# CONFIG_RTAI_DOC_DOX is not set
+# CONFIG_RTAI_DOC_LATEX_NONSTOP is not set
+# CONFIG_RTAI_DOC_DBX is not set
+# CONFIG_RTAI_OPT_EXPERT is not set
+
+#
+# Nucleus
+#
+CONFIG_RTAI_OPT_FUSION=y
+CONFIG_RTAI_OPT_PIPE=y
+CONFIG_RTAI_OPT_PIPE_NRDEV="32"
+CONFIG_RTAI_OPT_PERCPU_TIMER=y
+
+#
+# Machine (powerpc64)
+#
+CONFIG_RTAI_HW_FPU=y
+CONFIG_RTAI_HW_APERIODIC_TIMER=y
+
+#
+# APIs
+#
+CONFIG_RTAI_SKIN_NATIVE=y
+CONFIG_RTAI_OPT_NATIVE_REGISTRY=y
+CONFIG_RTAI_OPT_NATIVE_REGISTRY_NRSLOTS="512"
+CONFIG_RTAI_OPT_NATIVE_PIPE=y
+CONFIG_RTAI_OPT_NATIVE_PIPE_BUFSZ="4096"
+CONFIG_RTAI_OPT_NATIVE_SEM=y
+CONFIG_RTAI_OPT_NATIVE_EVENT=y
+CONFIG_RTAI_OPT_NATIVE_MUTEX=y
+CONFIG_RTAI_OPT_NATIVE_COND=y
+CONFIG_RTAI_OPT_NATIVE_QUEUE=y
+CONFIG_RTAI_OPT_NATIVE_HEAP=y
+CONFIG_RTAI_OPT_NATIVE_ALARM=y
+CONFIG_RTAI_OPT_NATIVE_INTR=y
+# CONFIG_RTAI_SKIN_POSIX is not set
+# CONFIG_RTAI_SKIN_PSOS is not set
+# CONFIG_RTAI_SKIN_UITRON is not set
+# CONFIG_RTAI_SKIN_VRTX is not set
+# CONFIG_RTAI_SKIN_VXWORKS is not set
+# CONFIG_RTAI_OPT_UVM is not set
+
+#
+# Drivers
+#
+# CONFIG_RTAI_DRIVERS_16550A is not set
+
+#
+# Simulator
+#
+# CONFIG_RTAI_MVM is not set
diff -Nru fusion-0.7.1/arch/ppc64/hal/GNUmakefile.am
fusion-0.7.1-ppc64-devel/arch/ppc64/hal/GNUmakefile.am
--- fusion-0.7.1/arch/ppc64/hal/GNUmakefile.am 1970-01-01 02:00:00.000000000
+0200
+++ fusion-0.7.1-ppc64-devel/arch/ppc64/hal/GNUmakefile.am 2004-12-24
11:12:11.000000000 +0200
@@ -0,0 +1,34 @@
+moduledir = $(DESTDIR)@RTAI_MODULE_DIR@
+
+modext = @RTAI_MODULE_EXT@
+
+CROSS_COMPILE = @CROSS_COMPILE@
+
+libhal_SRC = hal.c switch.S
+
+if CONFIG_RTAI_HW_FPU
+libhal_SRC += fpu.S
+endif
+
+rtai_hal.ko: @RTAI_KBUILD_ENV@
+rtai_hal.ko: $(libhal_SRC) FORCE
+ @RTAI_KBUILD_TOP@ \
+ @RTAI_KBUILD_CMD@ rtai_extradef="@RTAI_KMOD_CFLAGS@" \
+ @RTAI_KBUILD_BOTTOM@
+
+clean-local:
+ @RTAI_KBUILD_CLEAN@
+
+all-local: rtai_hal$(modext)
+if CONFIG_RTAI_OLD_FASHIONED_BUILD
+ $(mkinstalldirs) $(top_srcdir)/modules
+ $(INSTALL_DATA) $^ $(top_srcdir)/modules
+endif
+
+install-exec-local: rtai_hal$(modext)
+ $(mkinstalldirs) $(moduledir)
+ $(INSTALL_DATA) $< $(moduledir)
+
+.PHONY: FORCE
+
+EXTRA_DIST = $(libhal_SRC) fpu.S Makefile.kbuild
diff -Nru fusion-0.7.1/arch/ppc64/hal/GNUmakefile.in
fusion-0.7.1-ppc64-devel/arch/ppc64/hal/GNUmakefile.in
--- fusion-0.7.1/arch/ppc64/hal/GNUmakefile.in 1970-01-01 02:00:00.000000000
+0200
+++ fusion-0.7.1-ppc64-devel/arch/ppc64/hal/GNUmakefile.in 2005-05-16
15:32:58.000000000 +0300
@@ -0,0 +1,447 @@
+# GNUmakefile.in generated by automake 1.9.2 from GNUmakefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004 Free Software Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
[EMAIL PROTECTED]@
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+top_builddir = ../../..
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+INSTALL = @INSTALL@
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
[EMAIL PROTECTED]@am__append_1 = fpu.S
+subdir = arch/ppc64/hal
+DIST_COMMON = $(srcdir)/GNUmakefile.am $(srcdir)/GNUmakefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/config/autoconf/docbook.m4 \
+ $(top_srcdir)/configure.in
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(SHELL) $(top_srcdir)/config/autoconf/mkinstalldirs
+CONFIG_HEADER = $(top_builddir)/rtai_config.h
+CONFIG_CLEAN_FILES =
+SOURCES =
+DIST_SOURCES =
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMDEP_FALSE = @AMDEP_FALSE@
+AMDEP_TRUE = @AMDEP_TRUE@
+AMTAR = @AMTAR@
+AR = @AR@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCAS = @CCAS@
+CCASFLAGS = @CCASFLAGS@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CONFIG_LTT_FALSE = @CONFIG_LTT_FALSE@
+CONFIG_LTT_TRUE = @CONFIG_LTT_TRUE@
+CONFIG_PPC64_FALSE = @CONFIG_PPC64_FALSE@
+CONFIG_PPC64_TRUE = @CONFIG_PPC64_TRUE@
+CONFIG_RTAI_DOC_DOX_FALSE = @CONFIG_RTAI_DOC_DOX_FALSE@
+CONFIG_RTAI_DOC_DOX_TRUE = @CONFIG_RTAI_DOC_DOX_TRUE@
+CONFIG_RTAI_DRIVERS_16550A_FALSE = @CONFIG_RTAI_DRIVERS_16550A_FALSE@
+CONFIG_RTAI_DRIVERS_16550A_TRUE = @CONFIG_RTAI_DRIVERS_16550A_TRUE@
+CONFIG_RTAI_HW_FPU_FALSE = @CONFIG_RTAI_HW_FPU_FALSE@
+CONFIG_RTAI_HW_FPU_TRUE = @CONFIG_RTAI_HW_FPU_TRUE@
+CONFIG_RTAI_HW_SMI_DETECT_FALSE = @CONFIG_RTAI_HW_SMI_DETECT_FALSE@
+CONFIG_RTAI_HW_SMI_DETECT_TRUE = @CONFIG_RTAI_HW_SMI_DETECT_TRUE@
+CONFIG_RTAI_MAINT_FALSE = @CONFIG_RTAI_MAINT_FALSE@
+CONFIG_RTAI_MAINT_GCH_FALSE = @CONFIG_RTAI_MAINT_GCH_FALSE@
+CONFIG_RTAI_MAINT_GCH_TRUE = @CONFIG_RTAI_MAINT_GCH_TRUE@
+CONFIG_RTAI_MAINT_PGM_FALSE = @CONFIG_RTAI_MAINT_PGM_FALSE@
+CONFIG_RTAI_MAINT_PGM_TRUE = @CONFIG_RTAI_MAINT_PGM_TRUE@
+CONFIG_RTAI_MAINT_TRUE = @CONFIG_RTAI_MAINT_TRUE@
+CONFIG_RTAI_OLD_FASHIONED_BUILD_FALSE = @CONFIG_RTAI_OLD_FASHIONED_BUILD_FALSE@
+CONFIG_RTAI_OLD_FASHIONED_BUILD_TRUE = @CONFIG_RTAI_OLD_FASHIONED_BUILD_TRUE@
+CONFIG_RTAI_OPT_FUSION_FALSE = @CONFIG_RTAI_OPT_FUSION_FALSE@
+CONFIG_RTAI_OPT_FUSION_TRUE = @CONFIG_RTAI_OPT_FUSION_TRUE@
+CONFIG_RTAI_OPT_NATIVE_ALARM_FALSE = @CONFIG_RTAI_OPT_NATIVE_ALARM_FALSE@
+CONFIG_RTAI_OPT_NATIVE_ALARM_TRUE = @CONFIG_RTAI_OPT_NATIVE_ALARM_TRUE@
+CONFIG_RTAI_OPT_NATIVE_COND_FALSE = @CONFIG_RTAI_OPT_NATIVE_COND_FALSE@
+CONFIG_RTAI_OPT_NATIVE_COND_TRUE = @CONFIG_RTAI_OPT_NATIVE_COND_TRUE@
+CONFIG_RTAI_OPT_NATIVE_EVENT_FALSE = @CONFIG_RTAI_OPT_NATIVE_EVENT_FALSE@
+CONFIG_RTAI_OPT_NATIVE_EVENT_TRUE = @CONFIG_RTAI_OPT_NATIVE_EVENT_TRUE@
+CONFIG_RTAI_OPT_NATIVE_HEAP_FALSE = @CONFIG_RTAI_OPT_NATIVE_HEAP_FALSE@
+CONFIG_RTAI_OPT_NATIVE_HEAP_TRUE = @CONFIG_RTAI_OPT_NATIVE_HEAP_TRUE@
+CONFIG_RTAI_OPT_NATIVE_INTR_FALSE = @CONFIG_RTAI_OPT_NATIVE_INTR_FALSE@
+CONFIG_RTAI_OPT_NATIVE_INTR_TRUE = @CONFIG_RTAI_OPT_NATIVE_INTR_TRUE@
+CONFIG_RTAI_OPT_NATIVE_MUTEX_FALSE = @CONFIG_RTAI_OPT_NATIVE_MUTEX_FALSE@
+CONFIG_RTAI_OPT_NATIVE_MUTEX_TRUE = @CONFIG_RTAI_OPT_NATIVE_MUTEX_TRUE@
+CONFIG_RTAI_OPT_NATIVE_PIPE_FALSE = @CONFIG_RTAI_OPT_NATIVE_PIPE_FALSE@
+CONFIG_RTAI_OPT_NATIVE_PIPE_TRUE = @CONFIG_RTAI_OPT_NATIVE_PIPE_TRUE@
+CONFIG_RTAI_OPT_NATIVE_QUEUE_FALSE = @CONFIG_RTAI_OPT_NATIVE_QUEUE_FALSE@
+CONFIG_RTAI_OPT_NATIVE_QUEUE_TRUE = @CONFIG_RTAI_OPT_NATIVE_QUEUE_TRUE@
+CONFIG_RTAI_OPT_NATIVE_REGISTRY_FALSE = @CONFIG_RTAI_OPT_NATIVE_REGISTRY_FALSE@
+CONFIG_RTAI_OPT_NATIVE_REGISTRY_TRUE = @CONFIG_RTAI_OPT_NATIVE_REGISTRY_TRUE@
+CONFIG_RTAI_OPT_NATIVE_SEM_FALSE = @CONFIG_RTAI_OPT_NATIVE_SEM_FALSE@
+CONFIG_RTAI_OPT_NATIVE_SEM_TRUE = @CONFIG_RTAI_OPT_NATIVE_SEM_TRUE@
+CONFIG_RTAI_OPT_PIPE_FALSE = @CONFIG_RTAI_OPT_PIPE_FALSE@
+CONFIG_RTAI_OPT_PIPE_TRUE = @CONFIG_RTAI_OPT_PIPE_TRUE@
+CONFIG_RTAI_OPT_UVM_FALSE = @CONFIG_RTAI_OPT_UVM_FALSE@
+CONFIG_RTAI_OPT_UVM_TRUE = @CONFIG_RTAI_OPT_UVM_TRUE@
+CONFIG_RTAI_SKIN_NATIVE_FALSE = @CONFIG_RTAI_SKIN_NATIVE_FALSE@
+CONFIG_RTAI_SKIN_NATIVE_TRUE = @CONFIG_RTAI_SKIN_NATIVE_TRUE@
+CONFIG_RTAI_SKIN_POSIX_FALSE = @CONFIG_RTAI_SKIN_POSIX_FALSE@
+CONFIG_RTAI_SKIN_POSIX_TRUE = @CONFIG_RTAI_SKIN_POSIX_TRUE@
+CONFIG_RTAI_SKIN_PSOS_FALSE = @CONFIG_RTAI_SKIN_PSOS_FALSE@
+CONFIG_RTAI_SKIN_PSOS_TRUE = @CONFIG_RTAI_SKIN_PSOS_TRUE@
+CONFIG_RTAI_SKIN_UITRON_FALSE = @CONFIG_RTAI_SKIN_UITRON_FALSE@
+CONFIG_RTAI_SKIN_UITRON_TRUE = @CONFIG_RTAI_SKIN_UITRON_TRUE@
+CONFIG_RTAI_SKIN_VRTX_FALSE = @CONFIG_RTAI_SKIN_VRTX_FALSE@
+CONFIG_RTAI_SKIN_VRTX_TRUE = @CONFIG_RTAI_SKIN_VRTX_TRUE@
+CONFIG_RTAI_SKIN_VXWORKS_FALSE = @CONFIG_RTAI_SKIN_VXWORKS_FALSE@
+CONFIG_RTAI_SKIN_VXWORKS_TRUE = @CONFIG_RTAI_SKIN_VXWORKS_TRUE@
+CONFIG_SMP_FALSE = @CONFIG_SMP_FALSE@
+CONFIG_SMP_TRUE = @CONFIG_SMP_TRUE@
+CONFIG_X86_FALSE = @CONFIG_X86_FALSE@
+CONFIG_X86_LOCAL_APIC_FALSE = @CONFIG_X86_LOCAL_APIC_FALSE@
+CONFIG_X86_LOCAL_APIC_TRUE = @CONFIG_X86_LOCAL_APIC_TRUE@
+CONFIG_X86_TRUE = @CONFIG_X86_TRUE@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CROSS_COMPILE = @CROSS_COMPILE@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DBX_ABS_SRCDIR_FALSE = @DBX_ABS_SRCDIR_FALSE@
+DBX_ABS_SRCDIR_TRUE = @DBX_ABS_SRCDIR_TRUE@
+DBX_DOC_FALSE = @DBX_DOC_FALSE@
+DBX_DOC_ROOT = @DBX_DOC_ROOT@
+DBX_DOC_TRUE = @DBX_DOC_TRUE@
+DBX_FOP = @DBX_FOP@
+DBX_GEN_DOC_ROOT = @DBX_GEN_DOC_ROOT@
+DBX_LINT = @DBX_LINT@
+DBX_MAYBE_NONET = @DBX_MAYBE_NONET@
+DBX_ROOT = @DBX_ROOT@
+DBX_XSLTPROC = @DBX_XSLTPROC@
+DBX_XSL_ROOT = @DBX_XSL_ROOT@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DOXYGEN = @DOXYGEN@
+DOXYGEN_HAVE_DOT = @DOXYGEN_HAVE_DOT@
+DOXYGEN_SHOW_INCLUDE_FILES = @DOXYGEN_SHOW_INCLUDE_FILES@
+ECHO = @ECHO@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+F77 = @F77@
+FFLAGS = @FFLAGS@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LATEX_BATCHMODE = @LATEX_BATCHMODE@
+LATEX_MODE = @LATEX_MODE@
+LDFLAGS = @LDFLAGS@
+LEX = @LEX@
+LEXLIB = @LEXLIB@
+LEX_OUTPUT_ROOT = @LEX_OUTPUT_ROOT@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAINTAINER_MODE_FALSE = @MAINTAINER_MODE_FALSE@
+MAINTAINER_MODE_TRUE = @MAINTAINER_MODE_TRUE@
+MAKEINFO = @MAKEINFO@
+OBJEXT = @OBJEXT@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+RANLIB = @RANLIB@
+RTAI_BUILD_STRING = @RTAI_BUILD_STRING@
+RTAI_FP_CFLAGS = @RTAI_FP_CFLAGS@
+RTAI_HOST_STRING = @RTAI_HOST_STRING@
+RTAI_KBUILD_BOTTOM = @RTAI_KBUILD_BOTTOM@
+RTAI_KBUILD_CLEAN = @RTAI_KBUILD_CLEAN@
+RTAI_KBUILD_CMD = @RTAI_KBUILD_CMD@
+RTAI_KBUILD_DISTCLEAN = @RTAI_KBUILD_DISTCLEAN@
+RTAI_KBUILD_ENV = @RTAI_KBUILD_ENV@
+RTAI_KBUILD_TOP = @RTAI_KBUILD_TOP@
+RTAI_KMOD_APP_CFLAGS = @RTAI_KMOD_APP_CFLAGS@
+RTAI_KMOD_CFLAGS = @RTAI_KMOD_CFLAGS@
+RTAI_LINUX_DIR = @RTAI_LINUX_DIR@
+RTAI_LINUX_VERSION = @RTAI_LINUX_VERSION@
+RTAI_MAYBE_DOCDIR = @RTAI_MAYBE_DOCDIR@
+RTAI_MAYBE_SIMDIR = @RTAI_MAYBE_SIMDIR@
+RTAI_MODULE_DIR = @RTAI_MODULE_DIR@
+RTAI_MODULE_EXT = @RTAI_MODULE_EXT@
+RTAI_PIPE_NRDEV = @RTAI_PIPE_NRDEV@
+RTAI_TARGET_ARCH = @RTAI_TARGET_ARCH@
+RTAI_TARGET_SUBARCH = @RTAI_TARGET_SUBARCH@
+RTAI_USER_APP_CFLAGS = @RTAI_USER_APP_CFLAGS@
+RTAI_USER_CFLAGS = @RTAI_USER_CFLAGS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STRIP = @STRIP@
+VERSION = @VERSION@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_F77 = @ac_ct_F77@
+ac_ct_RANLIB = @ac_ct_RANLIB@
+ac_ct_STRIP = @ac_ct_STRIP@
+am__fastdepCC_FALSE = @am__fastdepCC_FALSE@
+am__fastdepCC_TRUE = @am__fastdepCC_TRUE@
+am__fastdepCXX_FALSE = @am__fastdepCXX_FALSE@
+am__fastdepCXX_TRUE = @am__fastdepCXX_TRUE@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+datadir = @datadir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+subdirs = @subdirs@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+moduledir = $(DESTDIR)@RTAI_MODULE_DIR@
+modext = @RTAI_MODULE_EXT@
+libhal_SRC = hal.c switch.S $(am__append_1)
+EXTRA_DIST = $(libhal_SRC) fpu.S Makefile.kbuild
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/GNUmakefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/GNUmakefile.am
$(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
+ && exit 0; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign
arch/ppc64/hal/GNUmakefile'; \
+ cd $(top_srcdir) && \
+ $(AUTOMAKE) --foreign arch/ppc64/hal/GNUmakefile
+.PRECIOUS: GNUmakefile
+GNUmakefile: $(srcdir)/GNUmakefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@
$(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@
$(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure
$(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+mostlyclean-libtool:
+ -rm -f *.lo
+
+clean-libtool:
+ -rm -rf .libs _libs
+
+distclean-libtool:
+ -rm -f libtool
+uninstall-info-am:
+tags: TAGS
+TAGS:
+
+ctags: CTAGS
+CTAGS:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \
+ list='$(DISTFILES)'; for file in $$list; do \
+ case $$file in \
+ $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \
+ $(top_srcdir)/*) file=`echo "$$file" | sed
"s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \
+ esac; \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test "$$dir" != "$$file" && test "$$dir" != "."; then \
+ dir="/$$dir"; \
+ $(mkdir_p) "$(distdir)$$dir"; \
+ else \
+ dir=''; \
+ fi; \
+ if test -d $$d/$$file; then \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
+ fi; \
+ cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
+ else \
+ test -f $(distdir)/$$file \
+ || cp -p $$d/$$file $(distdir)/$$file \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: GNUmakefile all-local
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ `test -z '$(STRIP)' || \
+ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-local mostlyclean-am
+
+distclean: distclean-am
+ -rm -f GNUmakefile
+distclean-am: clean-am distclean-generic distclean-libtool
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-exec-am: install-exec-local
+
+install-info: install-info-am
+
+install-man:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f GNUmakefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-info-am
+
+.PHONY: all all-am all-local check check-am clean clean-generic \
+ clean-libtool clean-local distclean distclean-generic \
+ distclean-libtool distdir dvi dvi-am html html-am info info-am \
+ install install-am install-data install-data-am install-exec \
+ install-exec-am install-exec-local install-info \
+ install-info-am install-man install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic \
+ mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am \
+ uninstall-info-am
+
+
+rtai_hal.ko: @RTAI_KBUILD_ENV@
+rtai_hal.ko: $(libhal_SRC) FORCE
+ @RTAI_KBUILD_TOP@ \
+ @RTAI_KBUILD_CMD@ rtai_extradef="@RTAI_KMOD_CFLAGS@" \
+ @RTAI_KBUILD_BOTTOM@
+
+clean-local:
+ @RTAI_KBUILD_CLEAN@
+
+all-local: rtai_hal$(modext)
[EMAIL PROTECTED]@ $(mkinstalldirs) $(top_srcdir)/modules
[EMAIL PROTECTED]@ $(INSTALL_DATA) $^ $(top_srcdir)/modules
+
+install-exec-local: rtai_hal$(modext)
+ $(mkinstalldirs) $(moduledir)
+ $(INSTALL_DATA) $< $(moduledir)
+
+.PHONY: FORCE
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff -Nru fusion-0.7.1/arch/ppc64/hal/Makefile.kbuild
fusion-0.7.1-ppc64-devel/arch/ppc64/hal/Makefile.kbuild
--- fusion-0.7.1/arch/ppc64/hal/Makefile.kbuild 1970-01-01 02:00:00.000000000
+0200
+++ fusion-0.7.1-ppc64-devel/arch/ppc64/hal/Makefile.kbuild 2004-12-24
11:12:11.000000000 +0200
@@ -0,0 +1,13 @@
+EXTRA_CFLAGS += -I$(rtai_srctree)/include \
+ -I$(src)/../../../include \
+ -I$(src)/../../.. \
+ $(rtai_extradef)
+
+EXTRA_AFLAGS += -I$(rtai_srctree)/include \
+ -I$(src)/../../../include \
+ -I$(src)/../../.. \
+ $(rtai_extradef)
+
+obj-m += rtai_hal.o
+
+rtai_hal-objs := $(rtai_objs)
diff -Nru fusion-0.7.1/arch/ppc64/hal/fpu.S
fusion-0.7.1-ppc64-devel/arch/ppc64/hal/fpu.S
--- fusion-0.7.1/arch/ppc64/hal/fpu.S 1970-01-01 02:00:00.000000000 +0200
+++ fusion-0.7.1-ppc64-devel/arch/ppc64/hal/fpu.S 2005-06-01
16:18:47.000000000 +0300
@@ -0,0 +1,74 @@
+/*
+ * arch/ppc64/hal/fpu.S
+ *
+ * Fusion 64-bit PowerPC adoption
+ * Copyright (C) 2005 Taneli Vähäkangas and Heikki Lindholm
+ * based on previous work:
+ *
+ * Copyright (C) 2001,2002,2003,2004 Philippe Gerum.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <asm/processor.h>
+#include <asm/ppc_asm.h>
+#include <rtai_config.h>
+
+#define RTHAL_FPSAVE(n, base) stfd n,8*(n)(base)
+#define RTHAL_FPSAVE2(n, base) RTHAL_FPSAVE(n, base); RTHAL_FPSAVE(n+1, base)
+#define RTHAL_FPSAVE4(n, base) RTHAL_FPSAVE2(n, base); RTHAL_FPSAVE2(n+2, base)
+#define RTHAL_FPSAVE8(n, base) RTHAL_FPSAVE4(n, base); RTHAL_FPSAVE4(n+4, base)
+#define RTHAL_FPSAVE16(n, base) RTHAL_FPSAVE8(n, base);
RTHAL_FPSAVE8(n+8, base)
+#define RTHAL_FPSAVE32(n, base) RTHAL_FPSAVE16(n, base);
RTHAL_FPSAVE16(n+16, base)
+
+/* r3 = &tcb->fpuenv */
+_GLOBAL(rthal_save_fpu)
+ mfmsr r5
+ ori r5,r5,MSR_FP /* Re-enable use of FPU. */
+ mtmsrd r5 /* Enable use of fpu. */
+ isync
+ RTHAL_FPSAVE32(0,r3)
+ mffs fr0
+ stfd fr0,8*32(r3)
+ blr
+
+#define RTHAL_FPLOAD(n, base) lfd n,8*(n)(base)
+#define RTHAL_FPLOAD2(n, base) RTHAL_FPLOAD(n, base); RTHAL_FPLOAD(n+1, base)
+#define RTHAL_FPLOAD4(n, base) RTHAL_FPLOAD2(n, base); RTHAL_FPLOAD2(n+2, base)
+#define RTHAL_FPLOAD8(n, base) RTHAL_FPLOAD4(n, base); RTHAL_FPLOAD4(n+4, base)
+#define RTHAL_FPLOAD16(n, base) RTHAL_FPLOAD8(n, base);
RTHAL_FPLOAD8(n+8, base)
+#define RTHAL_FPLOAD32(n, base) RTHAL_FPLOAD16(n, base);
RTHAL_FPLOAD16(n+16, base)
+
+/* r3 = &tcb->fpuenv */
+_GLOBAL(rthal_init_fpu)
+ mfmsr r5
+ ori r5,r5,MSR_FP|MSR_FE1 /* RT kernel threads always operate in
*/
+ li r4,MSR_FE0 /* imprecise non-recoverable exception
mode. */
+ andc r5,r5,r4
+ mtmsrd r5
+
+ /* Fallback wanted. */
+
+/* r3 = &tcb->fpuenv */
+_GLOBAL(rthal_restore_fpu)
+ mfmsr r5
+ ori r5,r5,MSR_FP /* Re-enable use of FPU. */
+ mtmsrd r5 /* Enable use of fpu. */
+ isync
+ lfd fr0,8*32(r3)
+ mtfsf 0xff,0
+ RTHAL_FPLOAD32(0,r3)
+ blr
diff -Nru fusion-0.7.1/arch/ppc64/hal/hal.c
fusion-0.7.1-ppc64-devel/arch/ppc64/hal/hal.c
--- fusion-0.7.1/arch/ppc64/hal/hal.c 1970-01-01 02:00:00.000000000 +0200
+++ fusion-0.7.1-ppc64-devel/arch/ppc64/hal/hal.c 2005-05-23
15:18:59.000000000 +0300
@@ -0,0 +1,956 @@
+/**
+ * @ingroup hal_ppc64
+ * @file
+ *
+ * Adeos-based Real-Time Abstraction Layer for PPC64.
+ *
+ * Original RTAI/x86 layer implementation: \n
+ * Copyright © 2000 Paolo Mantegazza, \n
+ * Copyright © 2000 Steve Papacharalambous, \n
+ * Copyright © 2000 Stuart Hughes, \n
+ * and others.
+ *
+ * RTAI/x86 rewrite over Adeos: \n
+ * Copyright © 2002 Philippe Gerum.
+ *
+ * PPC64 changes: \n
+ * Copyright © 2005 Heikki Lindholm.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/**
+ * @defgroup hal_ppc64 HAL/ppc64.
+ *
+ * Basic PowerPC-dependent services used by the real-time nucleus.
+ *
+ [EMAIL PROTECTED]/
+
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/console.h>
+#include <linux/kallsyms.h>
+#include <asm/system.h>
+#include <asm/hw_irq.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/unistd.h>
+#include <nucleus/asm/hal.h>
+#ifdef CONFIG_PROC_FS
+#include <linux/proc_fs.h>
+#endif /* CONFIG_PROC_FS */
+#include <stdarg.h>
+
+MODULE_LICENSE("GPL");
+
+static unsigned long rthal_cpufreq_arg;
+module_param_named(cpufreq,rthal_cpufreq_arg,ulong,0444);
+
+static unsigned long rthal_timerfreq_arg;
+module_param_named(timerfreq,rthal_timerfreq_arg,ulong,0444);
+
+adomain_t rthal_domain;
+
+static struct {
+
+ void (*handler)(unsigned irq, void *cookie);
+ void *cookie;
+ unsigned long hits[RTHAL_NR_CPUS];
+
+} rthal_realtime_irq[IPIPE_NR_IRQS];
+
+static struct {
+
+ unsigned long flags;
+ int count;
+
+} rthal_linux_irq[IPIPE_NR_XIRQS];
+
+static struct {
+
+ void (*handler)(void);
+ unsigned label;
+
+} rthal_sysreq_table[RTHAL_NR_SRQS];
+
+static int rthal_realtime_faults[RTHAL_NR_CPUS][ADEOS_NR_FAULTS];
+
+static int rthal_init_done;
+
+static unsigned rthal_sysreq_virq;
+
+static unsigned long rthal_sysreq_map = 1; /* #0 is invalid. */
+
+static unsigned long rthal_sysreq_pending;
+
+static unsigned long rthal_sysreq_running;
+
+static spinlock_t rthal_ssrq_lock = SPIN_LOCK_UNLOCKED;
+
+static volatile int rthal_sync_op;
+
+static atomic_t rthal_sync_count = ATOMIC_INIT(1);
+
+static rthal_trap_handler_t rthal_trap_handler;
+
+static int rthal_periodic_p;
+
+struct rthal_calibration_data rthal_tunables;
+
+volatile unsigned long rthal_cpu_realtime;
+
+int rthal_request_timer (void (*handler)(void),
+ unsigned long nstick)
+{
+ unsigned long flags;
+ int err;
+
+ flags = rthal_critical_enter(NULL);
+
+ if (nstick > 0)
+ {
+ /* Periodic setup --
+ Use the built-in Adeos service directly. */
+ err = adeos_tune_timer(nstick,0);
+ rthal_periodic_p = 1;
+ }
+ else
+ {
+ /* Oneshot setup. */
+ /*disarm_decr[adeos_processor_id()] = 1;*/
+ rthal_periodic_p = 0;
+ rthal_set_timer_shot(tb_ticks_per_jiffy);
+ }
+
+ rthal_release_irq(RTHAL_TIMER_IRQ);
+
+ err = rthal_request_irq(RTHAL_TIMER_IRQ,
+ (rthal_irq_handler_t)handler,
+ NULL);
+
+ rthal_critical_exit(flags);
+
+ return err;
+}
+
+void rthal_release_timer (void)
+
+{
+ unsigned long flags;
+
+ flags = rthal_critical_enter(NULL);
+
+ if (rthal_periodic_p)
+ adeos_tune_timer(0,ADEOS_RESET_TIMER);
+ else
+ {
+ /*disarm_decr[adeos_processor_id()] = 0;*/
+ set_dec(tb_ticks_per_jiffy);
+ }
+
+ rthal_release_irq(RTHAL_TIMER_IRQ);
+
+ rthal_critical_exit(flags);
+}
+
+unsigned long rthal_calibrate_timer (void)
+
+{
+ /* On PowerPC systems, the cost of setting the decrementer or the
+ PIT does not induce significant latency. In such a case, let's
+ return the shortest possible delay for a one-shot setup. In any
+ case, always return a non-zero value. e.g. 1 decrementer tick
+ here. */
+ return 1000000000 / RTHAL_CPU_FREQ;
+}
+
+unsigned long rthal_critical_enter (void (*synch)(void))
+
+{
+ unsigned long flags = adeos_critical_enter(synch);
+
+ if (atomic_dec_and_test(&rthal_sync_count))
+ rthal_sync_op = 0;
+ else if (synch != NULL)
+ printk(KERN_WARNING "RTAI: Nested critical sync will fail.\n");
+
+ return flags;
+}
+
+void rthal_critical_exit (unsigned long flags)
+
+{
+ atomic_inc(&rthal_sync_count);
+ adeos_critical_exit(flags);
+}
+
+static void rthal_irq_trampoline (unsigned irq)
+
+{
+ rthal_realtime_irq[irq].hits[adeos_processor_id()]++;
+ rthal_realtime_irq[irq].handler(irq,rthal_realtime_irq[irq].cookie);
+}
+
+int rthal_request_irq (unsigned irq,
+ void (*handler)(unsigned irq, void *cookie),
+ void *cookie)
+{
+ unsigned long flags;
+ int err = 0;
+
+ if (handler == NULL || irq >= IPIPE_NR_IRQS)
+ return -EINVAL;
+
+ flags = rthal_critical_enter(NULL);
+
+ if (rthal_realtime_irq[irq].handler != NULL)
+ {
+ err = -EBUSY;
+ goto unlock_and_exit;
+ }
+
+ err = adeos_virtualize_irq_from(&rthal_domain,
+ irq,
+ &rthal_irq_trampoline,
+ NULL,
+ IPIPE_DYNAMIC_MASK);
+ if (!err)
+ {
+ rthal_realtime_irq[irq].handler = handler;
+ rthal_realtime_irq[irq].cookie = cookie;
+ }
+
+ unlock_and_exit:
+
+ rthal_critical_exit(flags);
+
+ return err;
+}
+
+int rthal_release_irq (unsigned irq)
+
+{
+ int err = 0;
+
+ if (irq >= IPIPE_NR_IRQS)
+ return -EINVAL;
+
+ err = adeos_virtualize_irq_from(&rthal_domain,
+ irq,
+ NULL,
+ NULL,
+ IPIPE_PASS_MASK);
+
+ if (!err)
+ xchg(&rthal_realtime_irq[irq].handler,NULL);
+
+ return err;
+}
+
+/**
+ * Enable an IRQ source.
+ *
+ */
+int rthal_enable_irq (unsigned irq)
+
+{
+ if (irq >= IPIPE_NR_XIRQS)
+ return -EINVAL;
+
+ if (irq_desc[irq].handler == NULL ||
+ irq_desc[irq].handler->enable == NULL)
+ return -ENODEV;
+
+ irq_desc[irq].handler->enable(irq);
+
+ return 0;
+}
+
+/**
+ * Disable an IRQ source.
+ *
+ */
+int rthal_disable_irq (unsigned irq)
+
+{
+ if (irq >= IPIPE_NR_XIRQS)
+ return -EINVAL;
+
+ if (irq_desc[irq].handler == NULL ||
+ irq_desc[irq].handler->disable == NULL)
+ return -ENODEV;
+
+ irq_desc[irq].handler->disable(irq);
+
+ return 0;
+}
+
+/**
+ * Install a shared Linux interrupt handler.
+ *
+ * rthal_request_linux_irq installs function @a handler as a standard
+ * Linux interrupt service routine for IRQ level @a irq forcing Linux
+ * to share the IRQ with other interrupt handlers. The handler is
+ * appended to any already existing Linux handler for the same irq and
+ * is run by Linux irq as any of its handler. In this way a real time
+ * application can monitor Linux interrupts handling at its will. The
+ * handler appears in /proc/interrupts.
+ *
+ * @param irq is the IRQ level to which the handler will be associated.
+ *
+ * @param handler pointer on the interrupt service routine to be installed.
+ *
+ * @param name is a name for /proc/interrupts.
+ *
+ * @param dev_id is to pass to the interrupt handler, in the same way as the
+ * standard Linux irq request call.
+ *
+ * The interrupt service routine can be uninstalled using
+ * rthal_release_linux_irq().
+ *
+ * @retval 0 on success.
+ * @retval -EINVAL if @a irq is not a valid external IRQ number or handler
+ * is @c NULL.
+ */
+int rthal_request_linux_irq (unsigned irq,
+ irqreturn_t (*handler)(int irq,
+ void *dev_id,
+ struct pt_regs *regs),
+ char *name,
+ void *dev_id)
+{
+ unsigned long flags;
+ int err = 0;
+
+ if (irq >= IPIPE_NR_XIRQS || !handler)
+ return -EINVAL;
+
+ flags = rthal_spin_lock_irqsave(&irq_desc[irq].lock);
+
+ if (rthal_linux_irq[irq].count++ == 0 && irq_desc[irq].action)
+ {
+ rthal_linux_irq[irq].flags = irq_desc[irq].action->flags;
+ irq_desc[irq].action->flags |= SA_SHIRQ;
+ }
+
+ rthal_spin_unlock_irqrestore(flags,&irq_desc[irq].lock);
+
+ err = request_irq(irq,handler,SA_SHIRQ,name,dev_id);
+
+ return err;
+}
+
+/**
+ * Uninstall shared Linux interrupt handler.
+ *
+ * @param dev_id is to pass to the interrupt handler, in the same way as the
+ * standard Linux irq request call.
+ *
+ * @param irq is the IRQ level of the interrupt handler to be freed.
+ *
+ * @retval 0 on success.
+ * @retval -EINVAL if @a irq is not a valid external IRQ number.
+ */
+int rthal_release_linux_irq (unsigned irq, void *dev_id)
+
+{
+ unsigned long flags;
+
+ if (irq >= IPIPE_NR_XIRQS || rthal_linux_irq[irq].count == 0)
+ return -EINVAL;
+
+ free_irq(irq,dev_id);
+
+ flags = rthal_spin_lock_irqsave(&irq_desc[irq].lock);
+
+ if (--rthal_linux_irq[irq].count == 0 && irq_desc[irq].action)
+ irq_desc[irq].action->flags = rthal_linux_irq[irq].flags;
+
+ rthal_spin_unlock_irqrestore(flags,&irq_desc[irq].lock);
+
+ return 0;
+}
+
+/**
+ * Pend an IRQ to Linux.
+ *
+ * rthal_pend_linux_irq appends a Linux interrupt irq for processing in Linux
IRQ
+ * mode, i.e. with hardware interrupts fully enabled.
+ */
+int rthal_pend_linux_irq (unsigned irq)
+
+{
+ return adeos_propagate_irq(irq);
+}
+
+/**
+ * Install a system request handler
+ *
+ * rthal_request_srq installs a RTAI system request (srq) by assigning
+ * @a handler, the function to be called in kernel space following its
+ * activation by a call to rthal_pend_linux_srq(). @a handler is in
+ * practice used to request a service from the kernel. In fact Linux
+ * system requests cannot be used safely from RTAI so you can setup a
+ * handler that receives real time requests and safely executes them
+ * when Linux is running.
+ *
+ * @return the number of the assigned system request on success.
+ * @retval -EINVAL if @a handler is @c NULL.
+ * @retval -EBUSY if no free srq slot is available.
+ */
+int rthal_request_srq (unsigned label,
+ void (*handler)(void))
+{
+ unsigned long flags;
+ int srq;
+
+ if (handler == NULL)
+ return -EINVAL;
+
+ flags = rthal_spin_lock_irqsave(&rthal_ssrq_lock);
+
+ if (rthal_sysreq_map != ~0)
+ {
+ srq = ffz(rthal_sysreq_map);
+ set_bit(srq,&rthal_sysreq_map);
+ rthal_sysreq_table[srq].handler = handler;
+ rthal_sysreq_table[srq].label = label;
+ }
+ else
+ srq = -EBUSY;
+
+ rthal_spin_unlock_irqrestore(flags,&rthal_ssrq_lock);
+
+ return srq;
+}
+
+/**
+ * Uninstall a system request handler
+ *
+ * rthal_release_srq uninstalls the specified system call @a srq, returned by
+ * installing the related handler with a previous call to rthal_request_srq().
+ *
+ * @retval EINVAL if @a srq is invalid.
+ */
+int rthal_release_srq (unsigned srq)
+
+{
+ if (srq < 1 ||
+ srq >= RTHAL_NR_SRQS ||
+ !test_and_clear_bit(srq,&rthal_sysreq_map))
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * Append a Linux IRQ.
+ *
+ * rthal_pend_linux_srq appends a system call request srq to be used as a
service
+ * request to the Linux kernel.
+ *
+ * @param srq is the value returned by rthal_request_srq.
+ */
+int rthal_pend_linux_srq (unsigned srq)
+
+{
+ if (srq > 0 && srq < RTHAL_NR_SRQS)
+ {
+ if (!test_and_set_bit(srq,&rthal_sysreq_pending))
+ {
+ adeos_schedule_irq(rthal_sysreq_virq);
+ return 1;
+ }
+
+ return 0; /* Already pending. */
+ }
+
+ return -EINVAL;
+}
+
+#ifdef CONFIG_SMP
+
+int rthal_set_irq_affinity (unsigned irq, cpumask_t cpumask, cpumask_t
*oldmask)
+
+{
+ cpumask_t _oldmask;
+
+ if (irq >= IPIPE_NR_XIRQS)
+ return -EINVAL;
+
+ _oldmask = adeos_set_irq_affinity(irq,cpumask);
+
+ if (oldmask)
+ *oldmask = _oldmask;
+
+ return cpus_empty(_oldmask) ? -EINVAL : 0;
+}
+
+#else /* !CONFIG_SMP */
+
+int rthal_set_irq_affinity (unsigned irq, cpumask_t cpumask, cpumask_t
*oldmask) {
+
+ return 0;
+}
+
+#endif /* CONFIG_SMP */
+
+rthal_trap_handler_t rthal_set_trap_handler (rthal_trap_handler_t handler) {
+
+ return (rthal_trap_handler_t)xchg(&rthal_trap_handler,handler);
+}
+
+static void rthal_trap_fault (adevinfo_t *evinfo)
+
+{
+ adeos_declare_cpuid;
+
+ adeos_load_cpuid();
+
+ if (evinfo->domid == RTHAL_DOMAIN_ID)
+ {
+ rthal_realtime_faults[cpuid][evinfo->event]++;
+
+ if (rthal_trap_handler != NULL &&
+ test_bit(cpuid,&rthal_cpu_realtime) &&
+ rthal_trap_handler(evinfo) != 0)
+ return;
+ }
+
+ adeos_propagate_event(evinfo);
+}
+
+static void rthal_ssrq_trampoline (unsigned virq)
+
+{
+ unsigned long pending;
+
+ rthal_spin_lock(&rthal_ssrq_lock);
+
+ while ((pending = rthal_sysreq_pending & ~rthal_sysreq_running) != 0)
+ {
+ unsigned srq = ffnz(pending);
+ set_bit(srq,&rthal_sysreq_running);
+ clear_bit(srq,&rthal_sysreq_pending);
+ rthal_spin_unlock(&rthal_ssrq_lock);
+
+ if (test_bit(srq,&rthal_sysreq_map))
+ rthal_sysreq_table[srq].handler();
+
+ clear_bit(srq,&rthal_sysreq_running);
+ rthal_spin_lock(&rthal_ssrq_lock);
+ }
+
+ rthal_spin_unlock(&rthal_ssrq_lock);
+}
+
+static void rthal_domain_entry (int iflag)
+
+{
+ unsigned trapnr;
+
+#if !defined(CONFIG_ADEOS_NOTHREADS)
+ if (!iflag)
+ goto spin;
+#endif /* !CONFIG_ADEOS_NOTHREADS */
+
+ /* Trap all faults. */
+ for (trapnr = 0; trapnr < ADEOS_NR_FAULTS; trapnr++)
+ adeos_catch_event(trapnr,&rthal_trap_fault);
+
+ printk(KERN_INFO "RTAI: hal/ppc64 loaded.\n");
+
+#if !defined(CONFIG_ADEOS_NOTHREADS)
+ spin:
+
+ for (;;)
+ adeos_suspend_domain();
+#endif /* !CONFIG_ADEOS_NOTHREADS */
+}
+
+#ifdef CONFIG_PROC_FS
+
+struct proc_dir_entry *rthal_proc_root;
+
+static ssize_t hal_read_proc (char *page,
+ char **start,
+ off_t off,
+ int count,
+ int *eof,
+ void *data)
+{
+ int len, major, minor, patchlevel;
+
+ /* Canonicalize the Adeos relno-candidate information to some
+ major.minor.patchlevel format to be parser-friendly. */
+
+ major = ADEOS_MAJOR_NUMBER;
+
+ if (ADEOS_MINOR_NUMBER < 255)
+ {
+ --major;
+ minor = 99;
+ patchlevel = ADEOS_MINOR_NUMBER;
+ }
+ else
+ {
+ minor = 0;
+ patchlevel = 0;
+ }
+
+ len = sprintf(page,"%d.%d.%d\n",major,minor,patchlevel);
+ len -= off;
+ if (len <= off + count) *eof = 1;
+ *start = page + off;
+ if(len > count) len = count;
+ if(len < 0) len = 0;
+
+ return len;
+}
+
+static ssize_t compiler_read_proc (char *page,
+ char **start,
+ off_t off,
+ int count,
+ int *eof,
+ void *data)
+{
+ int len;
+
+ len = sprintf(page,"%s\n",CONFIG_RTAI_COMPILER);
+ len -= off;
+ if (len <= off + count) *eof = 1;
+ *start = page + off;
+ if(len > count) len = count;
+ if(len < 0) len = 0;
+
+ return len;
+}
+
+static int irq_read_proc (char *page,
+ char **start,
+ off_t off,
+ int count,
+ int *eof,
+ void *data)
+{
+ int len = 0, cpuid, irq;
+ char *p = page;
+
+ p += sprintf(p,"IRQ ");
+
+ for (cpuid = 0; cpuid < num_online_cpus(); cpuid++)
+ p += sprintf(p," CPU%d",cpuid);
+
+ for (irq = 0; irq < IPIPE_NR_IRQS; irq++)
+ {
+ if (rthal_realtime_irq[irq].handler == NULL)
+ continue;
+
+ p += sprintf(p,"\n%3d:",irq);
+
+ for (cpuid = 0; cpuid < num_online_cpus(); cpuid++)
+ p += sprintf(p,"%12lu",rthal_realtime_irq[irq].hits[cpuid]);
+ }
+
+ p += sprintf(p,"\n");
+
+ len = p - page - off;
+ if (len <= off + count) *eof = 1;
+ *start = page + off;
+ if (len > count) len = count;
+ if (len < 0) len = 0;
+
+ return len;
+}
+
+static int faults_read_proc (char *page,
+ char **start,
+ off_t off,
+ int count,
+ int *eof,
+ void *data)
+{
+ static char *fault_labels[] = {
+ [0] = "Data or instruction access",
+ [1] = "Alignment",
+ [2] = "Altivec unavailable",
+ [3] = "Program check exception",
+ [4] = "Machine check exception",
+ [5] = "Unknown",
+ [6] = "Instruction breakpoint",
+ [7] = "Run mode exception",
+ [8] = "Single-step exception",
+ [9] = "Non-recoverable exception",
+ [10] = "Software emulation",
+ [11] = "Debug",
+ [12] = "SPE",
+ [13] = "Altivec assist"
+ };
+ int len = 0, cpuid, trap;
+ char *p = page;
+
+ p += sprintf(p,"TRAP ");
+
+ for (cpuid = 0; cpuid < num_online_cpus(); cpuid++)
+ p += sprintf(p," CPU%d",cpuid);
+
+ for (trap = 0; trap < 14; trap++)
+ {
+ p += sprintf(p,"\n%3d: ",trap);
+
+ for (cpuid = 0; cpuid < num_online_cpus(); cpuid++)
+ p += sprintf(p,"%12d",
+ rthal_realtime_faults[cpuid][trap]);
+
+ p += sprintf(p," (%s)",fault_labels[trap]);
+ }
+
+ p += sprintf(p,"\n");
+
+ len = p - page - off;
+ if (len <= off + count) *eof = 1;
+ *start = page + off;
+ if (len > count) len = count;
+ if (len < 0) len = 0;
+
+ return len;
+}
+
+static struct proc_dir_entry *add_proc_leaf (const char *name,
+ read_proc_t rdproc,
+ write_proc_t wrproc,
+ void *data,
+ struct proc_dir_entry *parent)
+{
+ int mode = wrproc ? 0644 : 0444;
+ struct proc_dir_entry *entry;
+
+ entry = create_proc_entry(name,mode,parent);
+
+ if (entry)
+ {
+ entry->nlink = 1;
+ entry->data = data;
+ entry->read_proc = rdproc;
+ entry->write_proc = wrproc;
+ entry->owner = THIS_MODULE;
+ }
+
+ return entry;
+}
+
+static int rthal_proc_register (void)
+
+{
+ rthal_proc_root = create_proc_entry("rtai",S_IFDIR, 0);
+
+ if (!rthal_proc_root)
+ {
+ printk(KERN_ERR "RTAI: Unable to initialize /proc/rtai.\n");
+ return -1;
+ }
+
+ rthal_proc_root->owner = THIS_MODULE;
+
+ add_proc_leaf("hal",
+ &hal_read_proc,
+ NULL,
+ NULL,
+ rthal_proc_root);
+
+ add_proc_leaf("compiler",
+ &compiler_read_proc,
+ NULL,
+ NULL,
+ rthal_proc_root);
+
+ add_proc_leaf("irq",
+ &irq_read_proc,
+ NULL,
+ NULL,
+ rthal_proc_root);
+
+ add_proc_leaf("faults",
+ &faults_read_proc,
+ NULL,
+ NULL,
+ rthal_proc_root);
+ return 0;
+}
+
+static void rthal_proc_unregister (void)
+
+{
+ remove_proc_entry("hal",rthal_proc_root);
+ remove_proc_entry("compiler",rthal_proc_root);
+ remove_proc_entry("irq",rthal_proc_root);
+ remove_proc_entry("faults",rthal_proc_root);
+ remove_proc_entry("rtai",NULL);
+}
+
+#endif /* CONFIG_PROC_FS */
+
+int __rthal_init (void)
+
+{
+ adattr_t attr;
+ int err;
+
+#ifdef CONFIG_SMP
+ /* The nucleus also sets the same CPU affinity so that both
+ modules keep their execution sequence on SMP boxen. */
+ set_cpus_allowed(current,cpumask_of_cpu(0));
+#endif /* CONFIG_SMP */
+
+ /* Allocate a virtual interrupt to handle sysreqs within the Linux
+ domain. */
+ rthal_sysreq_virq = adeos_alloc_irq();
+
+ if (!rthal_sysreq_virq)
+ {
+ printk(KERN_ERR "RTAI: No virtual interrupt available.\n");
+ return -EBUSY;
+ }
+
+ err = adeos_virtualize_irq(rthal_sysreq_virq,
+ &rthal_ssrq_trampoline,
+ NULL,
+ IPIPE_HANDLE_MASK);
+ if (err)
+ {
+ printk(KERN_ERR "RTAI: Failed to virtualize IRQ.\n");
+ goto out_free_irq;
+ }
+
+ if (rthal_cpufreq_arg == 0)
+ {
+ adsysinfo_t sysinfo;
+ adeos_get_sysinfo(&sysinfo);
+ /* The CPU frequency is expressed as the timebase frequency
+ for this port. */
+ rthal_cpufreq_arg = (unsigned long)sysinfo.cpufreq;
+ }
+
+ rthal_tunables.cpu_freq = rthal_cpufreq_arg;
+
+ if (rthal_timerfreq_arg == 0)
+ rthal_timerfreq_arg = rthal_tunables.cpu_freq;
+
+ rthal_tunables.timer_freq = rthal_timerfreq_arg;
+
+#ifdef CONFIG_PROC_FS
+ rthal_proc_register();
+#endif /* CONFIG_PROC_FS */
+
+ /* Let Adeos do its magic for our real-time domain. */
+ adeos_init_attr(&attr);
+ attr.name = "RTAI";
+ attr.domid = RTHAL_DOMAIN_ID;
+ attr.entry = &rthal_domain_entry;
+ attr.priority = ADEOS_ROOT_PRI + 100; /* Precede Linux in the pipeline */
+
+ err = adeos_register_domain(&rthal_domain,&attr);
+
+ if (!err)
+ rthal_init_done = 1;
+ else
+ {
+ printk(KERN_ERR "RTAI: Domain registration failed.\n");
+ goto out_proc_unregister;
+ }
+
+ return 0;
+
+out_proc_unregister:
+#ifdef CONFIG_PROC_FS
+ rthal_proc_unregister();
+#endif
+ adeos_virtualize_irq(rthal_sysreq_virq,NULL,NULL,0);
+
+out_free_irq:
+ adeos_free_irq(rthal_sysreq_virq);
+
+ return err;
+}
+
+void __rthal_exit (void)
+
+{
+#ifdef CONFIG_SMP
+ /* The nucleus also sets the same CPU affinity so that both
+ modules keep their execution sequence on SMP boxen. */
+ set_cpus_allowed(current,cpumask_of_cpu(0));
+#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_PROC_FS
+ rthal_proc_unregister();
+#endif
+
+ if (rthal_sysreq_virq)
+ {
+ adeos_virtualize_irq(rthal_sysreq_virq,NULL,NULL,0);
+ adeos_free_irq(rthal_sysreq_virq);
+ }
+
+ if (rthal_init_done)
+ adeos_unregister_domain(&rthal_domain);
+
+ printk(KERN_INFO "RTAI: hal/ppc64 unloaded.\n");
+}
+
+/[EMAIL PROTECTED]/
+
+module_init(__rthal_init);
+module_exit(__rthal_exit);
+
+EXPORT_SYMBOL(rthal_request_irq);
+EXPORT_SYMBOL(rthal_release_irq);
+EXPORT_SYMBOL(rthal_enable_irq);
+EXPORT_SYMBOL(rthal_disable_irq);
+EXPORT_SYMBOL(rthal_request_linux_irq);
+EXPORT_SYMBOL(rthal_release_linux_irq);
+EXPORT_SYMBOL(rthal_pend_linux_irq);
+EXPORT_SYMBOL(rthal_request_srq);
+EXPORT_SYMBOL(rthal_release_srq);
+EXPORT_SYMBOL(rthal_pend_linux_srq);
+EXPORT_SYMBOL(rthal_set_irq_affinity);
+EXPORT_SYMBOL(rthal_request_timer);
+EXPORT_SYMBOL(rthal_release_timer);
+EXPORT_SYMBOL(rthal_set_trap_handler);
+EXPORT_SYMBOL(rthal_calibrate_timer);
+
+EXPORT_SYMBOL(rthal_critical_enter);
+EXPORT_SYMBOL(rthal_critical_exit);
+EXPORT_SYMBOL(rthal_switch_context);
+
+EXPORT_SYMBOL(rthal_domain);
+EXPORT_SYMBOL(rthal_tunables);
+EXPORT_SYMBOL(rthal_cpu_realtime);
+#ifdef CONFIG_PROC_FS
+EXPORT_SYMBOL(rthal_proc_root);
+#endif /* CONFIG_PROC_FS */
+
+#ifdef CONFIG_RTAI_HW_FPU
+EXPORT_SYMBOL(rthal_init_fpu);
+EXPORT_SYMBOL(rthal_save_fpu);
+EXPORT_SYMBOL(rthal_restore_fpu);
+#endif /* CONFIG_RTAI_HW_FPU */
diff -Nru fusion-0.7.1/arch/ppc64/hal/switch.S
fusion-0.7.1-ppc64-devel/arch/ppc64/hal/switch.S
--- fusion-0.7.1/arch/ppc64/hal/switch.S 1970-01-01 02:00:00.000000000
+0200
+++ fusion-0.7.1-ppc64-devel/arch/ppc64/hal/switch.S 2005-06-01
16:20:52.000000000 +0300
@@ -0,0 +1,128 @@
+/*
+ * arch/ppc64/hal/switch.S
+ *
+ * Fusion 64-bit PowerPC adoption
+ * Copyright (C) 2005 Taneli Vähäkangas and Heikki Lindholm
+ * based on previous work:
+ *
+ * Copyright (C) 2004 Philippe Gerum.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <asm/processor.h>
+#include <asm/cputable.h>
+#include <asm/page.h>
+#include <asm/offsets.h>
+#include <asm/ppc_asm.h>
+#include <rtai_config.h>
+
+#define RTHAL_SAVEREG(reg, pos) std reg,STACK_FRAME_OVERHEAD+4*(pos)(r1)
+#define RTHAL_LOADREG(reg, pos) ld reg,STACK_FRAME_OVERHEAD+4*(pos)(r1)
+
+/*
+ * r3=out_kspp, r4=in_kspp
+ */
+_GLOBAL(rthal_switch_context)
+ /* orig. 124 - WHY? */
+ /* 108+16 = 124 (16 = STACK_FRAME_OVERHEAD on ppc32)
+ stdu r1,-224-STACK_FRAME_OVERHEAD(r1)
+
+ /* Save general purpose registers. */
+
+ RTHAL_SAVEREG(r31,0)
+ RTHAL_SAVEREG(r30,1)
+ RTHAL_SAVEREG(r29,2)
+ RTHAL_SAVEREG(r28,3)
+ RTHAL_SAVEREG(r27,4)
+ RTHAL_SAVEREG(r26,5)
+ RTHAL_SAVEREG(r25,6)
+ RTHAL_SAVEREG(r24,7)
+ RTHAL_SAVEREG(r23,8)
+ RTHAL_SAVEREG(r22,9)
+ RTHAL_SAVEREG(r21,10)
+ RTHAL_SAVEREG(r20,11)
+ RTHAL_SAVEREG(r19,12)
+ RTHAL_SAVEREG(r18,13)
+ RTHAL_SAVEREG(r17,14)
+ RTHAL_SAVEREG(r16,15)
+ RTHAL_SAVEREG(r15,16)
+ RTHAL_SAVEREG(r14,17)
+ RTHAL_SAVEREG(r13,18)
+ RTHAL_SAVEREG(r3,19)
+ RTHAL_SAVEREG(r2,20)
+ RTHAL_SAVEREG(r0,21)
+
+ /* Save special registers. */
+
+ mfctr r2
+ RTHAL_SAVEREG(r2,22)
+ mfcr r2
+ RTHAL_SAVEREG(r2,23)
+ mfxer r2
+ RTHAL_SAVEREG(r2,24)
+ mflr r2
+ RTHAL_SAVEREG(r2,25)
+ mfmsr r2
+ RTHAL_SAVEREG(r2,26)
+
+ /* Switch stacks. */
+
+ std r1,0(r3) /* *out_kspp = sp */
+ /* TODO: VSIDs */
+ ld r1,0(r4) /* sp = *in_kspp */
+
+ /* Restore special registers. */
+
+ RTHAL_LOADREG(r2,26)
+ mtmsrd r2
+ RTHAL_LOADREG(r2,25)
+ mtlr r2
+ RTHAL_LOADREG(r2,24)
+ mtxer r2
+ RTHAL_LOADREG(r2,23)
+ mtcr r2
+ RTHAL_LOADREG(r2,22)
+ mtctr r2
+
+ /* Restore general purpose registers. */
+
+ RTHAL_LOADREG(r0,21)
+ RTHAL_LOADREG(r2,20)
+ RTHAL_LOADREG(r3,19)
+ RTHAL_LOADREG(r13,18)
+ RTHAL_LOADREG(r14,17)
+ RTHAL_LOADREG(r15,16)
+ RTHAL_LOADREG(r16,15)
+ RTHAL_LOADREG(r17,14)
+ RTHAL_LOADREG(r18,13)
+ RTHAL_LOADREG(r19,12)
+ RTHAL_LOADREG(r20,11)
+ RTHAL_LOADREG(r21,10)
+ RTHAL_LOADREG(r22,9)
+ RTHAL_LOADREG(r23,8)
+ RTHAL_LOADREG(r24,7)
+ RTHAL_LOADREG(r25,6)
+ RTHAL_LOADREG(r26,5)
+ RTHAL_LOADREG(r27,4)
+ RTHAL_LOADREG(r28,3)
+ RTHAL_LOADREG(r29,2)
+ RTHAL_LOADREG(r30,1)
+ RTHAL_LOADREG(r31,0)
+
+ addi r1,r1,224+STACK_FRAME_OVERHEAD
+
+ blr
diff -Nru fusion-0.7.1/config/autoconf/arch2host.sh
fusion-0.7.1-ppc64-devel/config/autoconf/arch2host.sh
--- fusion-0.7.1/config/autoconf/arch2host.sh 2004-08-28 13:45:30.000000000
+0300
+++ fusion-0.7.1-ppc64-devel/config/autoconf/arch2host.sh 2005-05-16
14:58:38.000000000 +0300
@@ -10,6 +10,9 @@
ppc|powerpc)
echo powerpc-unknown-linux-gnu
;;
+ppc64|powerpc64)
+ echo powerpc64-unknown-linux-gnu
+ ;;
"")
# Shorthand not specified: return default value.
echo $2
diff -Nru fusion-0.7.1/configure fusion-0.7.1-ppc64-devel/configure
--- fusion-0.7.1/configure 2005-04-02 17:26:16.000000000 +0300
+++ fusion-0.7.1-ppc64-devel/configure 2005-05-16 15:12:42.000000000 +0300
@@ -467,7 +467,7 @@
#endif"
ac_subdirs_all="$ac_subdirs_all sim"
-ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME
PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix
program_transform_name bindir sbindir libexecdir datadir sysconfdir
sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir
build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS build
build_cpu build_vendor build_os host host_cpu host_vendor host_os
INSTALL_PROGRAM INSTALL_SCRIPT INSTALL_DATA CC CFLAGS LDFLAGS CPPFLAGS ac_ct_CC
EXEEXT OBJEXT CYGPATH_W PACKAGE VERSION ACLOCAL AUTOCONF AUTOMAKE AUTOHEADER
MAKEINFO install_sh STRIP ac_ct_STRIP INSTALL_STRIP_PROGRAM mkdir_p AWK
SET_MAKE am__leading_dot AMTAR am__tar am__untar DEPDIR am__include am__quote
AMDEP_TRUE AMDEP_FALSE AMDEPBACKSLASH CCDEPMODE am__fastdepCC_TRUE
am__fastdepCC_FALSE MAINTAINER_MODE_TRUE MAINTAINER_MODE_FALSE MAINT EGREP LN_S
ECHO AR ac_ct_AR RANLIB ac_ct_RANLIB CPP CXX CXXFLAGS ac_ct_CXX CXXDEPMODE
am__fastdepCXX_TRUE am__fastdepCXX_FALSE CXXCPP F77 FFLAGS ac_ct_F77 LIBTOOL
CCAS CCASFLAGS LEX LEXLIB LEX_OUTPUT_ROOT CONFIG_RTAI_HW_SMI_DETECT_TRUE
CONFIG_RTAI_HW_SMI_DETECT_FALSE RTAI_MAYBE_SIMDIR RTAI_MAYBE_DOCDIR DOXYGEN
DOXYGEN_HAVE_DOT DBX_DOC_ROOT DBX_DOC_TRUE DBX_DOC_FALSE DBX_GEN_DOC_ROOT
DBX_LINT DBX_XSLTPROC DBX_FOP DBX_MAYBE_NONET DBX_ROOT DBX_XSL_ROOT
DBX_ABS_SRCDIR_TRUE DBX_ABS_SRCDIR_FALSE CONFIG_X86_LOCAL_APIC_TRUE
CONFIG_X86_LOCAL_APIC_FALSE CONFIG_X86_TRUE CONFIG_X86_FALSE CONFIG_PPC_TRUE
CONFIG_PPC_FALSE CONFIG_SMP_TRUE CONFIG_SMP_FALSE CONFIG_LTT_TRUE
CONFIG_LTT_FALSE CONFIG_RTAI_HW_FPU_TRUE CONFIG_RTAI_HW_FPU_FALSE
CONFIG_RTAI_OPT_FUSION_TRUE CONFIG_RTAI_OPT_FUSION_FALSE
CONFIG_RTAI_OPT_PIPE_TRUE CONFIG_RTAI_OPT_PIPE_FALSE
CONFIG_RTAI_SKIN_POSIX_TRUE CONFIG_RTAI_SKIN_POSIX_FALSE
CONFIG_RTAI_SKIN_NATIVE_TRUE CONFIG_RTAI_SKIN_NATIVE_FALSE
CONFIG_RTAI_SKIN_PSOS_TRUE CONFIG_RTAI_SKIN_PSOS_FALSE
CONFIG_RTAI_SKIN_VXWORKS_TRUE CONFIG_RTAI_SKIN_VXWORKS_FALSE
CONFIG_RTAI_SKIN_VRTX_TRUE CONFIG_RTAI_SKIN_VRTX_FALSE
CONFIG_RTAI_SKIN_UITRON_TRUE CONFIG_RTAI_SKIN_UITRON_FALSE
CONFIG_RTAI_OPT_UVM_TRUE CONFIG_RTAI_OPT_UVM_FALSE
CONFIG_RTAI_DRIVERS_16550A_TRUE CONFIG_RTAI_DRIVERS_16550A_FALSE
CONFIG_RTAI_OPT_NATIVE_REGISTRY_TRUE CONFIG_RTAI_OPT_NATIVE_REGISTRY_FALSE
CONFIG_RTAI_OPT_NATIVE_PIPE_TRUE CONFIG_RTAI_OPT_NATIVE_PIPE_FALSE
CONFIG_RTAI_OPT_NATIVE_SEM_TRUE CONFIG_RTAI_OPT_NATIVE_SEM_FALSE
CONFIG_RTAI_OPT_NATIVE_EVENT_TRUE CONFIG_RTAI_OPT_NATIVE_EVENT_FALSE
CONFIG_RTAI_OPT_NATIVE_MUTEX_TRUE CONFIG_RTAI_OPT_NATIVE_MUTEX_FALSE
CONFIG_RTAI_OPT_NATIVE_COND_TRUE CONFIG_RTAI_OPT_NATIVE_COND_FALSE
CONFIG_RTAI_OPT_NATIVE_QUEUE_TRUE CONFIG_RTAI_OPT_NATIVE_QUEUE_FALSE
CONFIG_RTAI_OPT_NATIVE_HEAP_TRUE CONFIG_RTAI_OPT_NATIVE_HEAP_FALSE
CONFIG_RTAI_OPT_NATIVE_ALARM_TRUE CONFIG_RTAI_OPT_NATIVE_ALARM_FALSE
CONFIG_RTAI_OPT_NATIVE_INTR_TRUE CONFIG_RTAI_OPT_NATIVE_INTR_FALSE
CONFIG_RTAI_DOC_DOX_TRUE CONFIG_RTAI_DOC_DOX_FALSE CONFIG_RTAI_MAINT_TRUE
CONFIG_RTAI_MAINT_FALSE CONFIG_RTAI_MAINT_GCH_TRUE CONFIG_RTAI_MAINT_GCH_FALSE
CONFIG_RTAI_MAINT_PGM_TRUE CONFIG_RTAI_MAINT_PGM_FALSE
CONFIG_RTAI_OLD_FASHIONED_BUILD_TRUE CONFIG_RTAI_OLD_FASHIONED_BUILD_FALSE
CROSS_COMPILE DOXYGEN_SHOW_INCLUDE_FILES LATEX_BATCHMODE LATEX_MODE
RTAI_BUILD_STRING RTAI_HOST_STRING RTAI_TARGET_ARCH RTAI_TARGET_SUBARCH
RTAI_KMOD_CFLAGS RTAI_USER_CFLAGS RTAI_KMOD_APP_CFLAGS RTAI_USER_APP_CFLAGS
RTAI_FP_CFLAGS RTAI_LINUX_DIR RTAI_LINUX_VERSION RTAI_MODULE_DIR
RTAI_MODULE_EXT RTAI_KBUILD_ENV RTAI_KBUILD_TOP RTAI_KBUILD_BOTTOM
RTAI_KBUILD_CMD RTAI_KBUILD_CLEAN RTAI_KBUILD_DISTCLEAN RTAI_PIPE_NRDEV subdirs
LIBOBJS LTLIBOBJS'
+ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME
PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix
program_transform_name bindir sbindir libexecdir datadir sysconfdir
sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir
build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS build
build_cpu build_vendor build_os host host_cpu host_vendor host_os
INSTALL_PROGRAM INSTALL_SCRIPT INSTALL_DATA CC CFLAGS LDFLAGS CPPFLAGS ac_ct_CC
EXEEXT OBJEXT CYGPATH_W PACKAGE VERSION ACLOCAL AUTOCONF AUTOMAKE AUTOHEADER
MAKEINFO install_sh STRIP ac_ct_STRIP INSTALL_STRIP_PROGRAM mkdir_p AWK
SET_MAKE am__leading_dot AMTAR am__tar am__untar DEPDIR am__include am__quote
AMDEP_TRUE AMDEP_FALSE AMDEPBACKSLASH CCDEPMODE am__fastdepCC_TRUE
am__fastdepCC_FALSE MAINTAINER_MODE_TRUE MAINTAINER_MODE_FALSE MAINT EGREP LN_S
ECHO AR ac_ct_AR RANLIB ac_ct_RANLIB CPP CXX CXXFLAGS ac_ct_CXX CXXDEPMODE
am__fastdepCXX_TRUE am__fastdepCXX_FALSE CXXCPP F77 FFLAGS ac_ct_F77 LIBTOOL
CCAS CCASFLAGS LEX LEXLIB LEX_OUTPUT_ROOT CONFIG_RTAI_HW_SMI_DETECT_TRUE
CONFIG_RTAI_HW_SMI_DETECT_FALSE RTAI_MAYBE_SIMDIR RTAI_MAYBE_DOCDIR DOXYGEN
DOXYGEN_HAVE_DOT DBX_DOC_ROOT DBX_DOC_TRUE DBX_DOC_FALSE DBX_GEN_DOC_ROOT
DBX_LINT DBX_XSLTPROC DBX_FOP DBX_MAYBE_NONET DBX_ROOT DBX_XSL_ROOT
DBX_ABS_SRCDIR_TRUE DBX_ABS_SRCDIR_FALSE CONFIG_X86_LOCAL_APIC_TRUE
CONFIG_X86_LOCAL_APIC_FALSE CONFIG_X86_TRUE CONFIG_X86_FALSE CONFIG_PPC_TRUE
CONFIG_PPC_FALSE CONFIG_PPC64_TRUE CONFIG_PPC64_FALSE CONFIG_SMP_TRUE
CONFIG_SMP_FALSE CONFIG_LTT_TRUE CONFIG_LTT_FALSE CONFIG_RTAI_HW_FPU_TRUE
CONFIG_RTAI_HW_FPU_FALSE CONFIG_RTAI_OPT_FUSION_TRUE
CONFIG_RTAI_OPT_FUSION_FALSE CONFIG_RTAI_OPT_PIPE_TRUE
CONFIG_RTAI_OPT_PIPE_FALSE CONFIG_RTAI_SKIN_POSIX_TRUE
CONFIG_RTAI_SKIN_POSIX_FALSE CONFIG_RTAI_SKIN_NATIVE_TRUE
CONFIG_RTAI_SKIN_NATIVE_FALSE CONFIG_RTAI_SKIN_PSOS_TRUE
CONFIG_RTAI_SKIN_PSOS_FALSE CONFIG_RTAI_SKIN_VXWORKS_TRUE
CONFIG_RTAI_SKIN_VXWORKS_FALSE CONFIG_RTAI_SKIN_VRTX_TRUE
CONFIG_RTAI_SKIN_VRTX_FALSE CONFIG_RTAI_SKIN_UITRON_TRUE
CONFIG_RTAI_SKIN_UITRON_FALSE CONFIG_RTAI_OPT_UVM_TRUE
CONFIG_RTAI_OPT_UVM_FALSE CONFIG_RTAI_DRIVERS_16550A_TRUE
CONFIG_RTAI_DRIVERS_16550A_FALSE CONFIG_RTAI_OPT_NATIVE_REGISTRY_TRUE
CONFIG_RTAI_OPT_NATIVE_REGISTRY_FALSE CONFIG_RTAI_OPT_NATIVE_PIPE_TRUE
CONFIG_RTAI_OPT_NATIVE_PIPE_FALSE CONFIG_RTAI_OPT_NATIVE_SEM_TRUE
CONFIG_RTAI_OPT_NATIVE_SEM_FALSE CONFIG_RTAI_OPT_NATIVE_EVENT_TRUE
CONFIG_RTAI_OPT_NATIVE_EVENT_FALSE CONFIG_RTAI_OPT_NATIVE_MUTEX_TRUE
CONFIG_RTAI_OPT_NATIVE_MUTEX_FALSE CONFIG_RTAI_OPT_NATIVE_COND_TRUE
CONFIG_RTAI_OPT_NATIVE_COND_FALSE CONFIG_RTAI_OPT_NATIVE_QUEUE_TRUE
CONFIG_RTAI_OPT_NATIVE_QUEUE_FALSE CONFIG_RTAI_OPT_NATIVE_HEAP_TRUE
CONFIG_RTAI_OPT_NATIVE_HEAP_FALSE CONFIG_RTAI_OPT_NATIVE_ALARM_TRUE
CONFIG_RTAI_OPT_NATIVE_ALARM_FALSE CONFIG_RTAI_OPT_NATIVE_INTR_TRUE
CONFIG_RTAI_OPT_NATIVE_INTR_FALSE CONFIG_RTAI_DOC_DOX_TRUE
CONFIG_RTAI_DOC_DOX_FALSE CONFIG_RTAI_MAINT_TRUE CONFIG_RTAI_MAINT_FALSE
CONFIG_RTAI_MAINT_GCH_TRUE CONFIG_RTAI_MAINT_GCH_FALSE
CONFIG_RTAI_MAINT_PGM_TRUE CONFIG_RTAI_MAINT_PGM_FALSE
CONFIG_RTAI_OLD_FASHIONED_BUILD_TRUE CONFIG_RTAI_OLD_FASHIONED_BUILD_FALSE
CROSS_COMPILE DOXYGEN_SHOW_INCLUDE_FILES LATEX_BATCHMODE LATEX_MODE
RTAI_BUILD_STRING RTAI_HOST_STRING RTAI_TARGET_ARCH RTAI_TARGET_SUBARCH
RTAI_KMOD_CFLAGS RTAI_USER_CFLAGS RTAI_KMOD_APP_CFLAGS RTAI_USER_APP_CFLAGS
RTAI_FP_CFLAGS RTAI_LINUX_DIR RTAI_LINUX_VERSION RTAI_MODULE_DIR
RTAI_MODULE_EXT RTAI_KBUILD_ENV RTAI_KBUILD_TOP RTAI_KBUILD_BOTTOM
RTAI_KBUILD_CMD RTAI_KBUILD_CLEAN RTAI_KBUILD_DISTCLEAN RTAI_PIPE_NRDEV subdirs
LIBOBJS LTLIBOBJS'
ac_subst_files=''
# Initialize some variables set by options.
@@ -19565,6 +19565,11 @@
wanted_kernel_arch=CONFIG_PPC
arch_have_sim=y
;;
+ powerpc64-*|ppc64-*)
+ RTAI_TARGET_ARCH=ppc64
+ wanted_kernel_arch=CONFIG_PPC64
+ arch_have_sim=n
+ ;;
*) echo ""
echo "*******************************************"
echo "Still unsupported host: $host -- Sorry."
@@ -20248,7 +20253,7 @@
timer_type="8254 PIT"
fi
;;
- ppc)
+ ppc|ppc64)
timer_type=decrementer
;;
esac
@@ -21325,6 +21330,16 @@
+if test "$CONFIG_PPC64" = y; then
+ CONFIG_PPC64_TRUE=
+ CONFIG_PPC64_FALSE='#'
+else
+ CONFIG_PPC64_TRUE='#'
+ CONFIG_PPC64_FALSE=
+fi
+
+
+
if test "$CONFIG_SMP" = y; then
CONFIG_SMP_TRUE=
CONFIG_SMP_FALSE='#'
@@ -21723,6 +21738,11 @@
RTAI_USER_CFLAGS="$RTAI_USER_CFLAGS -Wall -Wstrict-prototypes -pipe"
RTAI_FP_CFLAGS="-ffast-math -mhard-float"
;;
+ ppc64)
+ RTAI_TARGET_SUBARCH=
+ RTAI_USER_CFLAGS="$RTAI_USER_CFLAGS -Wall -Wstrict-prototypes -pipe"
+ RTAI_FP_CFLAGS="-ffast-math -mhard-float"
+ ;;
esac
if test x$CONFIG_RTAI_OPT_KSYMS = xy; then
@@ -21806,7 +21826,7 @@
-
ac_config_files="$ac_config_files GNUmakefile config/GNUmakefile
arch/GNUmakefile arch/i386/GNUmakefile arch/i386/hal/GNUmakefile
arch/ppc/GNUmakefile arch/ppc/hal/GNUmakefile include/GNUmakefile
include/nucleus/GNUmakefile include/nucleus/asm-i386/GNUmakefile
include/nucleus/asm-ppc/GNUmakefile include/nucleus/asm-uvm/GNUmakefile
nucleus/GNUmakefile nucleus/lib/GNUmakefile scripts/GNUmakefile
scripts/rtai-config scripts/rtai-load testsuite/GNUmakefile
testsuite/latency/GNUmakefile testsuite/klatency/GNUmakefile
testsuite/cruncher/GNUmakefile testsuite/switch/GNUmakefile skins/GNUmakefile
skins/posix/GNUmakefile skins/rtai/GNUmakefile skins/rtai/lib/GNUmakefile
skins/psos+/GNUmakefile skins/uitron/GNUmakefile skins/vrtx/GNUmakefile
skins/vxworks/GNUmakefile drivers/GNUmakefile drivers/16550A/GNUmakefile
drivers/16550A/lib/GNUmakefile"
+
ac_config_files="$ac_config_files GNUmakefile config/GNUmakefile
arch/GNUmakefile arch/i386/GNUmakefile arch/i386/hal/GNUmakefile
arch/ppc/GNUmakefile arch/ppc/hal/GNUmakefile arch/ppc64/GNUmakefile
arch/ppc64/hal/GNUmakefile include/GNUmakefile include/nucleus/GNUmakefile
include/nucleus/asm-i386/GNUmakefile include/nucleus/asm-ppc/GNUmakefile
include/nucleus/asm-ppc64/GNUmakefile include/nucleus/asm-uvm/GNUmakefile
nucleus/GNUmakefile nucleus/lib/GNUmakefile scripts/GNUmakefile
scripts/rtai-config scripts/rtai-load testsuite/GNUmakefile
testsuite/latency/GNUmakefile testsuite/klatency/GNUmakefile
testsuite/cruncher/GNUmakefile testsuite/switch/GNUmakefile skins/GNUmakefile
skins/posix/GNUmakefile skins/rtai/GNUmakefile skins/rtai/lib/GNUmakefile
skins/psos+/GNUmakefile skins/uitron/GNUmakefile skins/vrtx/GNUmakefile
skins/vxworks/GNUmakefile drivers/GNUmakefile drivers/16550A/GNUmakefile
drivers/16550A/lib/GNUmakefile"
if test \! x$RTAI_MAYBE_DOCDIR = x; then
@@ -21982,6 +22002,13 @@
Usually this means the macro was only invoked conditionally." >&2;}
{ (exit 1); exit 1; }; }
fi
+if test -z "${CONFIG_PPC64_TRUE}" && test -z "${CONFIG_PPC64_FALSE}"; then
+ { { echo "$as_me:$LINENO: error: conditional \"CONFIG_PPC64\" was never
defined.
+Usually this means the macro was only invoked conditionally." >&5
+echo "$as_me: error: conditional \"CONFIG_PPC64\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
if test -z "${CONFIG_SMP_TRUE}" && test -z "${CONFIG_SMP_FALSE}"; then
{ { echo "$as_me:$LINENO: error: conditional \"CONFIG_SMP\" was never
defined.
Usually this means the macro was only invoked conditionally." >&5
@@ -22632,10 +22659,13 @@
"arch/i386/hal/GNUmakefile" ) CONFIG_FILES="$CONFIG_FILES
arch/i386/hal/GNUmakefile" ;;
"arch/ppc/GNUmakefile" ) CONFIG_FILES="$CONFIG_FILES arch/ppc/GNUmakefile" ;;
"arch/ppc/hal/GNUmakefile" ) CONFIG_FILES="$CONFIG_FILES
arch/ppc/hal/GNUmakefile" ;;
+ "arch/ppc64/GNUmakefile" ) CONFIG_FILES="$CONFIG_FILES
arch/ppc64/GNUmakefile" ;;
+ "arch/ppc64/hal/GNUmakefile" ) CONFIG_FILES="$CONFIG_FILES
arch/ppc64/hal/GNUmakefile" ;;
"include/GNUmakefile" ) CONFIG_FILES="$CONFIG_FILES include/GNUmakefile" ;;
"include/nucleus/GNUmakefile" ) CONFIG_FILES="$CONFIG_FILES
include/nucleus/GNUmakefile" ;;
"include/nucleus/asm-i386/GNUmakefile" ) CONFIG_FILES="$CONFIG_FILES
include/nucleus/asm-i386/GNUmakefile" ;;
"include/nucleus/asm-ppc/GNUmakefile" ) CONFIG_FILES="$CONFIG_FILES
include/nucleus/asm-ppc/GNUmakefile" ;;
+ "include/nucleus/asm-ppc64/GNUmakefile" ) CONFIG_FILES="$CONFIG_FILES
include/nucleus/asm-ppc64/GNUmakefile" ;;
"include/nucleus/asm-uvm/GNUmakefile" ) CONFIG_FILES="$CONFIG_FILES
include/nucleus/asm-uvm/GNUmakefile" ;;
"nucleus/GNUmakefile" ) CONFIG_FILES="$CONFIG_FILES nucleus/GNUmakefile" ;;
"nucleus/lib/GNUmakefile" ) CONFIG_FILES="$CONFIG_FILES
nucleus/lib/GNUmakefile" ;;
@@ -22867,6 +22897,8 @@
s,@CONFIG_X86_FALSE@,$CONFIG_X86_FALSE,;t t
s,@CONFIG_PPC_TRUE@,$CONFIG_PPC_TRUE,;t t
s,@CONFIG_PPC_FALSE@,$CONFIG_PPC_FALSE,;t t
+s,@CONFIG_PPC64_TRUE@,$CONFIG_PPC64_TRUE,;t t
+s,@CONFIG_PPC64_FALSE@,$CONFIG_PPC64_FALSE,;t t
s,@CONFIG_SMP_TRUE@,$CONFIG_SMP_TRUE,;t t
s,@CONFIG_SMP_FALSE@,$CONFIG_SMP_FALSE,;t t
s,@CONFIG_LTT_TRUE@,$CONFIG_LTT_TRUE,;t t
diff -Nru fusion-0.7.1/configure.in fusion-0.7.1-ppc64-devel/configure.in
--- fusion-0.7.1/configure.in 2005-03-30 16:49:53.000000000 +0300
+++ fusion-0.7.1-ppc64-devel/configure.in 2005-05-16 15:18:40.000000000
+0300
@@ -53,6 +53,11 @@
wanted_kernel_arch=CONFIG_PPC
arch_have_sim=y
;;
+ powerpc64-*|ppc64-*)
+ RTAI_TARGET_ARCH=ppc64
+ wanted_kernel_arch=CONFIG_PPC64
+ arch_have_sim=n
+ ;;
*) echo ""
echo "*******************************************"
echo "Still unsupported host: $host -- Sorry."
@@ -592,7 +597,7 @@
timer_type="8254 PIT"
fi
;;
- ppc)
+ ppc|ppc64)
timer_type=decrementer
;;
esac
@@ -1035,6 +1040,7 @@
dnl CPU architecture
AM_CONDITIONAL(CONFIG_X86,[test "$CONFIG_X86" = y])
AM_CONDITIONAL(CONFIG_PPC,[test "$CONFIG_PPC" = y])
+AM_CONDITIONAL(CONFIG_PPC64,[test "$CONFIG_PPC64" = y])
AM_CONDITIONAL(CONFIG_SMP,[test "$CONFIG_SMP" = y])
AM_CONDITIONAL(CONFIG_LTT,[test "$CONFIG_LTT" = y])
@@ -1132,6 +1138,11 @@
RTAI_USER_CFLAGS="$RTAI_USER_CFLAGS -Wall -Wstrict-prototypes -pipe"
RTAI_FP_CFLAGS="-ffast-math -mhard-float"
;;
+ ppc64)
+ RTAI_TARGET_SUBARCH=
+ RTAI_USER_CFLAGS="$RTAI_USER_CFLAGS -Wall -Wstrict-prototypes -pipe"
+ RTAI_FP_CFLAGS="-ffast-math -mhard-float"
+ ;;
esac
if test x$CONFIG_RTAI_OPT_KSYMS = xy; then
@@ -1212,10 +1223,13 @@
arch/i386/hal/GNUmakefile \
arch/ppc/GNUmakefile \
arch/ppc/hal/GNUmakefile \
+ arch/ppc64/GNUmakefile \
+ arch/ppc64/hal/GNUmakefile \
include/GNUmakefile \
include/nucleus/GNUmakefile \
include/nucleus/asm-i386/GNUmakefile \
include/nucleus/asm-ppc/GNUmakefile \
+ include/nucleus/asm-ppc64/GNUmakefile \
include/nucleus/asm-uvm/GNUmakefile \
nucleus/GNUmakefile \
nucleus/lib/GNUmakefile \
diff -Nru fusion-0.7.1/include/nucleus/GNUmakefile.am
fusion-0.7.1-ppc64-devel/include/nucleus/GNUmakefile.am
--- fusion-0.7.1/include/nucleus/GNUmakefile.am 2005-03-05 18:13:05.000000000
+0200
+++ fusion-0.7.1-ppc64-devel/include/nucleus/GNUmakefile.am 2005-05-16
15:20:20.000000000 +0300
@@ -8,6 +8,10 @@
ARCHDIR = asm-ppc
endif
+if CONFIG_PPC64
+ARCHDIR = asm-ppc64
+endif
+
include_HEADERS = \
fusion.h \
heap.h \
diff -Nru fusion-0.7.1/include/nucleus/GNUmakefile.in
fusion-0.7.1-ppc64-devel/include/nucleus/GNUmakefile.in
--- fusion-0.7.1/include/nucleus/GNUmakefile.in 2005-04-02 17:26:24.000000000
+0300
+++ fusion-0.7.1-ppc64-devel/include/nucleus/GNUmakefile.in 2005-05-16
15:21:55.000000000 +0300
@@ -67,7 +67,7 @@
HEADERS = $(include_HEADERS)
ETAGS = etags
CTAGS = ctags
-DIST_SUBDIRS = asm-ppc asm-i386 asm-uvm
+DIST_SUBDIRS = asm-ppc asm-ppc64 asm-i386 asm-uvm
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
AMDEP_FALSE = @AMDEP_FALSE@
@@ -87,6 +87,8 @@
CONFIG_LTT_TRUE = @CONFIG_LTT_TRUE@
CONFIG_PPC_FALSE = @CONFIG_PPC_FALSE@
CONFIG_PPC_TRUE = @CONFIG_PPC_TRUE@
+CONFIG_PPC64_FALSE = @CONFIG_PPC64_FALSE@
+CONFIG_PPC64_TRUE = @CONFIG_PPC64_TRUE@
CONFIG_RTAI_DOC_DOX_FALSE = @CONFIG_RTAI_DOC_DOX_FALSE@
CONFIG_RTAI_DOC_DOX_TRUE = @CONFIG_RTAI_DOC_DOX_TRUE@
CONFIG_RTAI_DRIVERS_16550A_FALSE = @CONFIG_RTAI_DRIVERS_16550A_FALSE@
@@ -279,6 +281,7 @@
sysconfdir = @sysconfdir@
target_alias = @target_alias@
@[EMAIL PROTECTED] = asm-ppc
[EMAIL PROTECTED]@ARCHDIR = asm-ppc64
@[EMAIL PROTECTED] = asm-i386
include_HEADERS = \
fusion.h \
diff -Nru fusion-0.7.1/include/nucleus/asm-ppc64/GNUmakefile.am
fusion-0.7.1-ppc64-devel/include/nucleus/asm-ppc64/GNUmakefile.am
--- fusion-0.7.1/include/nucleus/asm-ppc64/GNUmakefile.am 1970-01-01
02:00:00.000000000 +0200
+++ fusion-0.7.1-ppc64-devel/include/nucleus/asm-ppc64/GNUmakefile.am
2005-05-16 15:38:44.000000000 +0300
@@ -0,0 +1,14 @@
+includedir = $(prefix)/include/nucleus/asm-ppc64
+
+include_HEADERS = \
+ atomic.h \
+ calibration.h \
+ hal.h \
+ system.h \
+ syscall.h \
+ uart.h
+
+install-data-local:
+ $(mkinstalldirs) $(DESTDIR)$(includedir)
+ rm -f $(DESTDIR)$(includedir)/../asm
+ $(LN_S) asm-ppc64 $(DESTDIR)$(includedir)/../asm
diff -Nru fusion-0.7.1/include/nucleus/asm-ppc64/GNUmakefile.in
fusion-0.7.1-ppc64-devel/include/nucleus/asm-ppc64/GNUmakefile.in
--- fusion-0.7.1/include/nucleus/asm-ppc64/GNUmakefile.in 1970-01-01
02:00:00.000000000 +0200
+++ fusion-0.7.1-ppc64-devel/include/nucleus/asm-ppc64/GNUmakefile.in
2005-05-16 15:39:41.000000000 +0300
@@ -0,0 +1,513 @@
+# GNUmakefile.in generated by automake 1.9.2 from GNUmakefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004 Free Software Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
[EMAIL PROTECTED]@
+
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+top_builddir = ../../..
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+INSTALL = @INSTALL@
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = include/nucleus/asm-ppc64
+DIST_COMMON = $(include_HEADERS) $(srcdir)/GNUmakefile.am \
+ $(srcdir)/GNUmakefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/config/autoconf/docbook.m4 \
+ $(top_srcdir)/configure.in
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(SHELL) $(top_srcdir)/config/autoconf/mkinstalldirs
+CONFIG_HEADER = $(top_builddir)/rtai_config.h
+CONFIG_CLEAN_FILES =
+SOURCES =
+DIST_SOURCES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = `echo $$p | sed -e 's|^.*/||'`;
+am__installdirs = "$(DESTDIR)$(includedir)"
+includeHEADERS_INSTALL = $(INSTALL_HEADER)
+HEADERS = $(include_HEADERS)
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMDEP_FALSE = @AMDEP_FALSE@
+AMDEP_TRUE = @AMDEP_TRUE@
+AMTAR = @AMTAR@
+AR = @AR@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCAS = @CCAS@
+CCASFLAGS = @CCASFLAGS@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CONFIG_LTT_FALSE = @CONFIG_LTT_FALSE@
+CONFIG_LTT_TRUE = @CONFIG_LTT_TRUE@
+CONFIG_PPC64_FALSE = @CONFIG_PPC64_FALSE@
+CONFIG_PPC64_TRUE = @CONFIG_PPC64_TRUE@
+CONFIG_RTAI_DOC_DOX_FALSE = @CONFIG_RTAI_DOC_DOX_FALSE@
+CONFIG_RTAI_DOC_DOX_TRUE = @CONFIG_RTAI_DOC_DOX_TRUE@
+CONFIG_RTAI_DRIVERS_16550A_FALSE = @CONFIG_RTAI_DRIVERS_16550A_FALSE@
+CONFIG_RTAI_DRIVERS_16550A_TRUE = @CONFIG_RTAI_DRIVERS_16550A_TRUE@
+CONFIG_RTAI_HW_FPU_FALSE = @CONFIG_RTAI_HW_FPU_FALSE@
+CONFIG_RTAI_HW_FPU_TRUE = @CONFIG_RTAI_HW_FPU_TRUE@
+CONFIG_RTAI_HW_SMI_DETECT_FALSE = @CONFIG_RTAI_HW_SMI_DETECT_FALSE@
+CONFIG_RTAI_HW_SMI_DETECT_TRUE = @CONFIG_RTAI_HW_SMI_DETECT_TRUE@
+CONFIG_RTAI_MAINT_FALSE = @CONFIG_RTAI_MAINT_FALSE@
+CONFIG_RTAI_MAINT_GCH_FALSE = @CONFIG_RTAI_MAINT_GCH_FALSE@
+CONFIG_RTAI_MAINT_GCH_TRUE = @CONFIG_RTAI_MAINT_GCH_TRUE@
+CONFIG_RTAI_MAINT_PGM_FALSE = @CONFIG_RTAI_MAINT_PGM_FALSE@
+CONFIG_RTAI_MAINT_PGM_TRUE = @CONFIG_RTAI_MAINT_PGM_TRUE@
+CONFIG_RTAI_MAINT_TRUE = @CONFIG_RTAI_MAINT_TRUE@
+CONFIG_RTAI_OLD_FASHIONED_BUILD_FALSE = @CONFIG_RTAI_OLD_FASHIONED_BUILD_FALSE@
+CONFIG_RTAI_OLD_FASHIONED_BUILD_TRUE = @CONFIG_RTAI_OLD_FASHIONED_BUILD_TRUE@
+CONFIG_RTAI_OPT_FUSION_FALSE = @CONFIG_RTAI_OPT_FUSION_FALSE@
+CONFIG_RTAI_OPT_FUSION_TRUE = @CONFIG_RTAI_OPT_FUSION_TRUE@
+CONFIG_RTAI_OPT_NATIVE_ALARM_FALSE = @CONFIG_RTAI_OPT_NATIVE_ALARM_FALSE@
+CONFIG_RTAI_OPT_NATIVE_ALARM_TRUE = @CONFIG_RTAI_OPT_NATIVE_ALARM_TRUE@
+CONFIG_RTAI_OPT_NATIVE_COND_FALSE = @CONFIG_RTAI_OPT_NATIVE_COND_FALSE@
+CONFIG_RTAI_OPT_NATIVE_COND_TRUE = @CONFIG_RTAI_OPT_NATIVE_COND_TRUE@
+CONFIG_RTAI_OPT_NATIVE_EVENT_FALSE = @CONFIG_RTAI_OPT_NATIVE_EVENT_FALSE@
+CONFIG_RTAI_OPT_NATIVE_EVENT_TRUE = @CONFIG_RTAI_OPT_NATIVE_EVENT_TRUE@
+CONFIG_RTAI_OPT_NATIVE_HEAP_FALSE = @CONFIG_RTAI_OPT_NATIVE_HEAP_FALSE@
+CONFIG_RTAI_OPT_NATIVE_HEAP_TRUE = @CONFIG_RTAI_OPT_NATIVE_HEAP_TRUE@
+CONFIG_RTAI_OPT_NATIVE_INTR_FALSE = @CONFIG_RTAI_OPT_NATIVE_INTR_FALSE@
+CONFIG_RTAI_OPT_NATIVE_INTR_TRUE = @CONFIG_RTAI_OPT_NATIVE_INTR_TRUE@
+CONFIG_RTAI_OPT_NATIVE_MUTEX_FALSE = @CONFIG_RTAI_OPT_NATIVE_MUTEX_FALSE@
+CONFIG_RTAI_OPT_NATIVE_MUTEX_TRUE = @CONFIG_RTAI_OPT_NATIVE_MUTEX_TRUE@
+CONFIG_RTAI_OPT_NATIVE_PIPE_FALSE = @CONFIG_RTAI_OPT_NATIVE_PIPE_FALSE@
+CONFIG_RTAI_OPT_NATIVE_PIPE_TRUE = @CONFIG_RTAI_OPT_NATIVE_PIPE_TRUE@
+CONFIG_RTAI_OPT_NATIVE_QUEUE_FALSE = @CONFIG_RTAI_OPT_NATIVE_QUEUE_FALSE@
+CONFIG_RTAI_OPT_NATIVE_QUEUE_TRUE = @CONFIG_RTAI_OPT_NATIVE_QUEUE_TRUE@
+CONFIG_RTAI_OPT_NATIVE_REGISTRY_FALSE = @CONFIG_RTAI_OPT_NATIVE_REGISTRY_FALSE@
+CONFIG_RTAI_OPT_NATIVE_REGISTRY_TRUE = @CONFIG_RTAI_OPT_NATIVE_REGISTRY_TRUE@
+CONFIG_RTAI_OPT_NATIVE_SEM_FALSE = @CONFIG_RTAI_OPT_NATIVE_SEM_FALSE@
+CONFIG_RTAI_OPT_NATIVE_SEM_TRUE = @CONFIG_RTAI_OPT_NATIVE_SEM_TRUE@
+CONFIG_RTAI_OPT_PIPE_FALSE = @CONFIG_RTAI_OPT_PIPE_FALSE@
+CONFIG_RTAI_OPT_PIPE_TRUE = @CONFIG_RTAI_OPT_PIPE_TRUE@
+CONFIG_RTAI_OPT_UVM_FALSE = @CONFIG_RTAI_OPT_UVM_FALSE@
+CONFIG_RTAI_OPT_UVM_TRUE = @CONFIG_RTAI_OPT_UVM_TRUE@
+CONFIG_RTAI_SKIN_NATIVE_FALSE = @CONFIG_RTAI_SKIN_NATIVE_FALSE@
+CONFIG_RTAI_SKIN_NATIVE_TRUE = @CONFIG_RTAI_SKIN_NATIVE_TRUE@
+CONFIG_RTAI_SKIN_POSIX_FALSE = @CONFIG_RTAI_SKIN_POSIX_FALSE@
+CONFIG_RTAI_SKIN_POSIX_TRUE = @CONFIG_RTAI_SKIN_POSIX_TRUE@
+CONFIG_RTAI_SKIN_PSOS_FALSE = @CONFIG_RTAI_SKIN_PSOS_FALSE@
+CONFIG_RTAI_SKIN_PSOS_TRUE = @CONFIG_RTAI_SKIN_PSOS_TRUE@
+CONFIG_RTAI_SKIN_UITRON_FALSE = @CONFIG_RTAI_SKIN_UITRON_FALSE@
+CONFIG_RTAI_SKIN_UITRON_TRUE = @CONFIG_RTAI_SKIN_UITRON_TRUE@
+CONFIG_RTAI_SKIN_VRTX_FALSE = @CONFIG_RTAI_SKIN_VRTX_FALSE@
+CONFIG_RTAI_SKIN_VRTX_TRUE = @CONFIG_RTAI_SKIN_VRTX_TRUE@
+CONFIG_RTAI_SKIN_VXWORKS_FALSE = @CONFIG_RTAI_SKIN_VXWORKS_FALSE@
+CONFIG_RTAI_SKIN_VXWORKS_TRUE = @CONFIG_RTAI_SKIN_VXWORKS_TRUE@
+CONFIG_SMP_FALSE = @CONFIG_SMP_FALSE@
+CONFIG_SMP_TRUE = @CONFIG_SMP_TRUE@
+CONFIG_X86_FALSE = @CONFIG_X86_FALSE@
+CONFIG_X86_LOCAL_APIC_FALSE = @CONFIG_X86_LOCAL_APIC_FALSE@
+CONFIG_X86_LOCAL_APIC_TRUE = @CONFIG_X86_LOCAL_APIC_TRUE@
+CONFIG_X86_TRUE = @CONFIG_X86_TRUE@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CROSS_COMPILE = @CROSS_COMPILE@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DBX_ABS_SRCDIR_FALSE = @DBX_ABS_SRCDIR_FALSE@
+DBX_ABS_SRCDIR_TRUE = @DBX_ABS_SRCDIR_TRUE@
+DBX_DOC_FALSE = @DBX_DOC_FALSE@
+DBX_DOC_ROOT = @DBX_DOC_ROOT@
+DBX_DOC_TRUE = @DBX_DOC_TRUE@
+DBX_FOP = @DBX_FOP@
+DBX_GEN_DOC_ROOT = @DBX_GEN_DOC_ROOT@
+DBX_LINT = @DBX_LINT@
+DBX_MAYBE_NONET = @DBX_MAYBE_NONET@
+DBX_ROOT = @DBX_ROOT@
+DBX_XSLTPROC = @DBX_XSLTPROC@
+DBX_XSL_ROOT = @DBX_XSL_ROOT@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DOXYGEN = @DOXYGEN@
+DOXYGEN_HAVE_DOT = @DOXYGEN_HAVE_DOT@
+DOXYGEN_SHOW_INCLUDE_FILES = @DOXYGEN_SHOW_INCLUDE_FILES@
+ECHO = @ECHO@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+F77 = @F77@
+FFLAGS = @FFLAGS@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LATEX_BATCHMODE = @LATEX_BATCHMODE@
+LATEX_MODE = @LATEX_MODE@
+LDFLAGS = @LDFLAGS@
+LEX = @LEX@
+LEXLIB = @LEXLIB@
+LEX_OUTPUT_ROOT = @LEX_OUTPUT_ROOT@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAINTAINER_MODE_FALSE = @MAINTAINER_MODE_FALSE@
+MAINTAINER_MODE_TRUE = @MAINTAINER_MODE_TRUE@
+MAKEINFO = @MAKEINFO@
+OBJEXT = @OBJEXT@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+RANLIB = @RANLIB@
+RTAI_BUILD_STRING = @RTAI_BUILD_STRING@
+RTAI_FP_CFLAGS = @RTAI_FP_CFLAGS@
+RTAI_HOST_STRING = @RTAI_HOST_STRING@
+RTAI_KBUILD_BOTTOM = @RTAI_KBUILD_BOTTOM@
+RTAI_KBUILD_CLEAN = @RTAI_KBUILD_CLEAN@
+RTAI_KBUILD_CMD = @RTAI_KBUILD_CMD@
+RTAI_KBUILD_DISTCLEAN = @RTAI_KBUILD_DISTCLEAN@
+RTAI_KBUILD_ENV = @RTAI_KBUILD_ENV@
+RTAI_KBUILD_TOP = @RTAI_KBUILD_TOP@
+RTAI_KMOD_APP_CFLAGS = @RTAI_KMOD_APP_CFLAGS@
+RTAI_KMOD_CFLAGS = @RTAI_KMOD_CFLAGS@
+RTAI_LINUX_DIR = @RTAI_LINUX_DIR@
+RTAI_LINUX_VERSION = @RTAI_LINUX_VERSION@
+RTAI_MAYBE_DOCDIR = @RTAI_MAYBE_DOCDIR@
+RTAI_MAYBE_SIMDIR = @RTAI_MAYBE_SIMDIR@
+RTAI_MODULE_DIR = @RTAI_MODULE_DIR@
+RTAI_MODULE_EXT = @RTAI_MODULE_EXT@
+RTAI_PIPE_NRDEV = @RTAI_PIPE_NRDEV@
+RTAI_TARGET_ARCH = @RTAI_TARGET_ARCH@
+RTAI_TARGET_SUBARCH = @RTAI_TARGET_SUBARCH@
+RTAI_USER_APP_CFLAGS = @RTAI_USER_APP_CFLAGS@
+RTAI_USER_CFLAGS = @RTAI_USER_CFLAGS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STRIP = @STRIP@
+VERSION = @VERSION@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_F77 = @ac_ct_F77@
+ac_ct_RANLIB = @ac_ct_RANLIB@
+ac_ct_STRIP = @ac_ct_STRIP@
+am__fastdepCC_FALSE = @am__fastdepCC_FALSE@
+am__fastdepCC_TRUE = @am__fastdepCC_TRUE@
+am__fastdepCXX_FALSE = @am__fastdepCXX_FALSE@
+am__fastdepCXX_TRUE = @am__fastdepCXX_TRUE@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+datadir = @datadir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+includedir = $(prefix)/include/nucleus/asm-ppc64
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+subdirs = @subdirs@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+include_HEADERS = \
+ atomic.h \
+ calibration.h \
+ hal.h \
+ system.h \
+ syscall.h \
+ uart.h
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/GNUmakefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/GNUmakefile.am
$(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
+ && exit 0; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign
include/nucleus/asm-ppc64/GNUmakefile'; \
+ cd $(top_srcdir) && \
+ $(AUTOMAKE) --foreign include/nucleus/asm-ppc64/GNUmakefile
+.PRECIOUS: GNUmakefile
+GNUmakefile: $(srcdir)/GNUmakefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@
$(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@
$(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure
$(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+mostlyclean-libtool:
+ -rm -f *.lo
+
+clean-libtool:
+ -rm -rf .libs _libs
+
+distclean-libtool:
+ -rm -f libtool
+uninstall-info-am:
+install-includeHEADERS: $(include_HEADERS)
+ @$(NORMAL_INSTALL)
+ test -z "$(includedir)" || $(mkdir_p) "$(DESTDIR)$(includedir)"
+ @list='$(include_HEADERS)'; for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ f=$(am__strip_dir) \
+ echo " $(includeHEADERS_INSTALL) '$$d$$p'
'$(DESTDIR)$(includedir)/$$f'"; \
+ $(includeHEADERS_INSTALL) "$$d$$p" "$(DESTDIR)$(includedir)/$$f"; \
+ done
+
+uninstall-includeHEADERS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(include_HEADERS)'; for p in $$list; do \
+ f=$(am__strip_dir) \
+ echo " rm -f '$(DESTDIR)$(includedir)/$$f'"; \
+ rm -f "$(DESTDIR)$(includedir)/$$f"; \
+ done
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) ' { files[$$0] = 1; } \
+ END { for (i in files) print i; }'`; \
+ mkid -fID $$unique
+tags: TAGS
+
+TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ tags=; \
+ here=`pwd`; \
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) ' { files[$$0] = 1; } \
+ END { for (i in files) print i; }'`; \
+ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$tags $$unique; \
+ fi
+ctags: CTAGS
+CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ tags=; \
+ here=`pwd`; \
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) ' { files[$$0] = 1; } \
+ END { for (i in files) print i; }'`; \
+ test -z "$(CTAGS_ARGS)$$tags$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$tags $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && cd $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) $$here
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \
+ list='$(DISTFILES)'; for file in $$list; do \
+ case $$file in \
+ $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \
+ $(top_srcdir)/*) file=`echo "$$file" | sed
"s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \
+ esac; \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test "$$dir" != "$$file" && test "$$dir" != "."; then \
+ dir="/$$dir"; \
+ $(mkdir_p) "$(distdir)$$dir"; \
+ else \
+ dir=''; \
+ fi; \
+ if test -d $$d/$$file; then \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
+ fi; \
+ cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
+ else \
+ test -f $(distdir)/$$file \
+ || cp -p $$d/$$file $(distdir)/$$file \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: GNUmakefile $(HEADERS)
+installdirs:
+ for dir in "$(DESTDIR)$(includedir)"; do \
+ test -z "$$dir" || $(mkdir_p) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ `test -z '$(STRIP)' || \
+ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-am
+ -rm -f GNUmakefile
+distclean-am: clean-am distclean-generic distclean-libtool \
+ distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+info: info-am
+
+info-am:
+
+install-data-am: install-data-local install-includeHEADERS
+
+install-exec-am:
+
+install-info: install-info-am
+
+install-man:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f GNUmakefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-includeHEADERS uninstall-info-am
+
+.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
+ clean-libtool ctags distclean distclean-generic \
+ distclean-libtool distclean-tags distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-data-local install-exec \
+ install-exec-am install-includeHEADERS install-info \
+ install-info-am install-man install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic \
+ mostlyclean-libtool pdf pdf-am ps ps-am tags uninstall \
+ uninstall-am uninstall-includeHEADERS uninstall-info-am
+
+
+install-data-local:
+ $(mkinstalldirs) $(DESTDIR)$(includedir)
+ rm -f $(DESTDIR)$(includedir)/../asm
+ $(LN_S) asm-ppc64 $(DESTDIR)$(includedir)/../asm
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff -Nru fusion-0.7.1/include/nucleus/asm-ppc64/atomic.h
fusion-0.7.1-ppc64-devel/include/nucleus/asm-ppc64/atomic.h
--- fusion-0.7.1/include/nucleus/asm-ppc64/atomic.h 1970-01-01
02:00:00.000000000 +0200
+++ fusion-0.7.1-ppc64-devel/include/nucleus/asm-ppc64/atomic.h 2005-06-01
16:31:43.000000000 +0300
@@ -0,0 +1,221 @@
+/*
+ * RTAI/fusion 64-bit PowerPC adoption
+ * Copyright (C) 2005 Taneli Vähäkangas and Heikki Lindholm
+ * based on previous work:
+ *
+ * Copyright (C) 2003,2004 Philippe Gerum <[EMAIL PROTECTED]>.
+ *
+ * RTAI/fusion is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * RTAI/fusion is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with RTAI/fusion; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+
+#ifndef _RTAI_ASM_PPC64_ATOMIC_H
+#define _RTAI_ASM_PPC64_ATOMIC_H
+
+#include <asm/atomic.h>
+
+/* on i386 these are in GNU asm/atomic.h */
+static __inline__ void atomic_set_mask(unsigned int mask,
+ unsigned int *ptr)
+{
+ __asm__ __volatile__ ("\n\
+1: lwarx 5,0,%0 \n\
+ or 5,5,%1\n"
+" stwcx. 5,0,%0 \n\
+ bne- 1b"
+ : /*no output*/
+ : "r" (ptr), "r" (mask)
+ : "r5", "cc", "memory");
+}
+
+static __inline__ void atomic_clear_mask(unsigned int mask,
+ unsigned int *ptr)
+{
+ __asm__ __volatile__ ("\n\
+1: lwarx 5,0,%0 \n\
+ andc 5,5,%1\n"
+" stwcx. 5,0,%0 \n\
+ bne- 1b"
+ : /*no output*/
+ : "r" (ptr), "r" (mask)
+ : "r5", "cc", "memory");
+}
+
+#ifdef __KERNEL__
+
+#include <linux/bitops.h>
+#include <asm/system.h>
+
+#define atomic_xchg(ptr,v) xchg(ptr,v)
+#define atomic_cmpxchg(ptr,o,n) cmpxchg(ptr,o,n)
+#define xnarch_memory_barrier() smp_mb()
+
+#define xnarch_atomic_set(pcounter,i) atomic_set(pcounter,i)
+#define xnarch_atomic_get(pcounter) atomic_read(pcounter)
+#define xnarch_atomic_inc(pcounter) atomic_inc(pcounter)
+#define xnarch_atomic_dec(pcounter) atomic_dec(pcounter)
+#define xnarch_atomic_inc_and_test(pcounter) atomic_inc_and_test(pcounter)
+#define xnarch_atomic_dec_and_test(pcounter) atomic_dec_and_test(pcounter)
+#define xnarch_atomic_set_mask(pflags,mask) atomic_set_mask(mask,pflags)
+#define xnarch_atomic_clear_mask(pflags,mask) atomic_clear_mask(mask,pflags)
+
+#else /* !__KERNEL__ */
+#ifdef PEKKA_PUUPAA
+#include <linux/config.h>
+
+/*
+ * Shamelessly lifted from <linux/asm-ppc64/system.h>
+ * and <linux/asm-ppc64/atomic.h>
+ */
+
+/* from asm-ppc64/memory.h */
+/*
+ * Arguably the bitops and *xchg operations don't imply any memory barrier
+ * or SMP ordering, but in fact a lot of drivers expect them to imply
+ * both, since they do on x86 cpus.
+ */
+#ifdef CONFIG_SMP
+#define EIEIO_ON_SMP "eieio\n"
+#define ISYNC_ON_SMP "\n\tisync"
+#else
+#define EIEIO_ON_SMP
+#define ISYNC_ON_SMP
+#endif
+
+static __inline__ unsigned long
+atomic_cmpxchg(volatile long *p, unsigned long old, unsigned long new)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__ (
+ EIEIO_ON_SMP
+"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
+ cmpd 0,%0,%3\n\
+ bne- 2f\n\
+ stdcx. %4,0,%2\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ "\n\
+2:"
+ : "=&r" (prev), "=m" (*p)
+ : "r" (p), "r" (old), "r" (new), "m" (*p)
+ : "cc", "memory");
+
+ return prev;
+}
+
+static __inline__ unsigned long
+atomic_xchg(volatile long *m, unsigned long val)
+{
+ unsigned long dummy;
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: ldarx %0,0,%3 # __xchg_u64\n\
+ stdcx. %2,0,%3\n\
+2: bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (dummy), "=m" (*m)
+ : "r" (val), "r" (m)
+ : "cc", "memory");
+
+ return (dummy);
+}
+
+static __inline__ void atomic_inc(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2 # atomic_inc\n\
+ addic %0,%0,1\n\
+ stwcx. %0,0,%2\n\
+ bne- 1b"
+ : "=&r" (t), "=m" (v->counter)
+ : "r" (&v->counter), "m" (v->counter)
+ : "cc");
+}
+
+static __inline__ int atomic_inc_return(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: lwarx %0,0,%1 # atomic_inc_return\n\
+ addic %0,%0,1\n\
+ stwcx. %0,0,%1\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+static __inline__ void atomic_dec(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2 # atomic_dec\n\
+ addic %0,%0,-1\n\
+ stwcx. %0,0,%2\n\
+ bne- 1b"
+ : "=&r" (t), "=m" (v->counter)
+ : "r" (&v->counter), "m" (v->counter)
+ : "cc");
+}
+
+static __inline__ int atomic_dec_return(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: lwarx %0,0,%1 # atomic_dec_return\n\
+ addic %0,%0,-1\n\
+ stwcx. %0,0,%1\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+
+
+#define xnarch_memory_barrier() __asm__ __volatile__ ("sync" : : : "memory")
+
+#define xnarch_atomic_set(pcounter,i) (((pcounter)->counter) = (i))
+#define xnarch_atomic_get(pcounter) ((pcounter)->counter)
+#define xnarch_atomic_inc(pcounter) atomic_inc(pcounter)
+#define xnarch_atomic_dec(pcounter) atomic_dec(pcounter)
+#define xnarch_atomic_inc_and_test(pcounter) (atomic_inc_return(pcounter) ==
0)
+#define xnarch_atomic_dec_and_test(pcounter) (atomic_dec_return(pcounter) ==
0)
+#define xnarch_atomic_set_mask(pflags,mask) atomic_set_mask(mask,pflags)
+#define xnarch_atomic_clear_mask(pflags,mask) atomic_clear_mask(mask,pflags)
+#endif
+#endif /* __KERNEL__ */
+
+typedef atomic_t atomic_counter_t;
+typedef unsigned int atomic_flags_t;
+
+#define xnarch_atomic_xchg(ptr,x) atomic_xchg(ptr,x)
+
+#endif /* !_RTAI_ASM_PPC64_ATOMIC_H */
diff -Nru fusion-0.7.1/include/nucleus/asm-ppc64/calibration.h
fusion-0.7.1-ppc64-devel/include/nucleus/asm-ppc64/calibration.h
--- fusion-0.7.1/include/nucleus/asm-ppc64/calibration.h 1970-01-01
02:00:00.000000000 +0200
+++ fusion-0.7.1-ppc64-devel/include/nucleus/asm-ppc64/calibration.h
2005-01-02 21:28:48.000000000 +0200
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2001,2002,2003,2004,2005 Philippe Gerum <[EMAIL PROTECTED]>.
+ *
+ * RTAI/fusion is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * RTAI/fusion is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with RTAI/fusion; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+
+#ifndef _RTAI_ASM_PPC_CALIBRATION_H
+#define _RTAI_ASM_PPC_CALIBRATION_H
+
+#include <rtai_config.h>
+#include <asm/delay.h>
+
+#define __bogomips (loops_per_jiffy/(500000/HZ))
+
+static inline unsigned long xnarch_get_sched_latency (void)
+
+{
+#if CONFIG_RTAI_HW_SCHED_LATENCY != 0
+#define __sched_latency CONFIG_RTAI_HW_SCHED_LATENCY
+#else
+
+#define __sched_latency 18500
+
+#endif /* CONFIG_RTAI_HW_SCHED_LATENCY */
+
+ return __sched_latency;
+}
+
+#undef __sched_latency
+#undef __bogomips
+
+#endif /* !_RTAI_ASM_PPC_CALIBRATION_H */
diff -Nru fusion-0.7.1/include/nucleus/asm-ppc64/hal.h
fusion-0.7.1-ppc64-devel/include/nucleus/asm-ppc64/hal.h
--- fusion-0.7.1/include/nucleus/asm-ppc64/hal.h 1970-01-01
02:00:00.000000000 +0200
+++ fusion-0.7.1-ppc64-devel/include/nucleus/asm-ppc64/hal.h 2005-06-01
21:28:14.000000000 +0300
@@ -0,0 +1,406 @@
+/**
+ * @ingroup hal_ppc64
+ * @file
+ *
+ * Adeos-based Real-Time Hardware Abstraction Layer for PPC64.
+ *
+ * RTAI/fusion 64-bit PowerPC adoption
+ * Copyright (C) 2005 Taneli Vähäkangas and Heikki Lindholm
+ * based on previous work:
+ *
+ * Original RTAI/ppc layer implementation: \n
+ * Copyright © 2000 Paolo Mantegazza, \n
+ * Copyright © 2001 David Schleef, \n
+ * Copyright © 2001 Lineo, Inc, \n
+ * Copyright © 2004 Wolfgang Grandegger, \n
+ * and others.
+ *
+ * Copyright © 2002-2004 Philippe Gerum.
+ *
+ * RTAI/fusion is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, Inc., 675 Mass Ave,
+ * Cambridge MA 02139, USA; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * RTAI/fusion is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with RTAI/fusion; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+
+/**
+ * @addtogroup hal_ppc64
+ [EMAIL PROTECTED]/
+
+#ifndef _RTAI_ASM_PPC64_HAL_H
+#define _RTAI_ASM_PPC64_HAL_H
+
+
+#include <rtai_config.h>
+#include <asm/div64.h>
+
+#define RTHAL_NR_CPUS ADEOS_NR_CPUS
+
+typedef unsigned long rthal_time_t;
+
+/*
+#define __rthal_u64tou32(ull, h, l) ({ \
+ union { unsigned long long _ull; \
+ struct { u_long _h; u_long _l; } _s; } _u; \
+ _u._ull = (ull); \
+ (h) = _u._s._h; \
+ (l) = _u._s._l; \
+ })
+
+#define __rthal_u64fromu32(h, l) ({ \
+ union { unsigned long long _ull; \
+ struct { u_long _h; u_long _l; } _s; } _u; \
+ _u._s._h = (h); \
+ _u._s._l = (l); \
+ _u._ull; \
+ })
+*/
+
+static inline unsigned long long rthal_ullmul(const unsigned long m0,
+ const unsigned long m1)
+{
+ return (unsigned long long)m0 * m1;
+}
+
+static inline unsigned long long rthal_ulldiv (unsigned long long ull,
+ const unsigned long uld,
+ unsigned long *const rp)
+{
+ const unsigned int r = ull % uld;
+ ull /= uld;
+
+ if (rp)
+ *rp = r;
+
+ return ull;
+}
+
+#define rthal_uldivrem(ull,ul,rp) ((u_long) rthal_ulldiv((ull),(ul),(rp)))
+
+static inline int rthal_imuldiv (int i, int mult, int div) {
+
+ /* Returns (int)i = (unsigned long long)i*(u_long)(mult)/(u_long)div. */
+ const unsigned long long ull = rthal_ullmul(i, mult);
+ return rthal_uldivrem(ull, div, NULL);
+}
+
+static inline __attribute_const__
+unsigned long long __rthal_ullimd (const unsigned long long op,
+ const unsigned long m,
+ const unsigned long d)
+{
+ return (op*m)/d;
+}
+
+/*
+static inline __attribute_const__
+unsigned long long __rthal_ullimd (const unsigned long long op,
+ const unsigned long m,
+ const unsigned long d)
+{
+ u_long oph, opl, tlh, tll, qh, rh, ql;
+ unsigned long long th, tl;
+
+ __rthal_u64tou32(op, oph, opl);
+ tl = rthal_ullmul(opl, m);
+ __rthal_u64tou32(tl, tlh, tll);
+ th = rthal_ullmul(oph, m);
+ th += tlh;
+
+ qh = rthal_uldivrem(th, d, &rh);
+ th = __rthal_u64fromu32(rh, tll);
+ ql = rthal_uldivrem(th, d, NULL);
+ return __rthal_u64fromu32(qh, ql);
+}
+*/
+
+static inline long long rthal_llimd (long long op,
+ unsigned long m,
+ unsigned long d)
+{
+
+ if(op < 0LL)
+ return -__rthal_ullimd(-op, m, d);
+ return __rthal_ullimd(op, m, d);
+}
+
+static inline unsigned long ffnz (unsigned long ul) {
+
+ __asm__ __volatile__ ("cntlzd %0, %1" : "=r" (ul) : "r" (ul & (-ul)));
+ return 63 - ul;
+}
+
+#if defined(__KERNEL__) && !defined(__cplusplus)
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <asm/system.h>
+#include <asm/time.h>
+#include <asm/timex.h>
+#include <nucleus/asm/atomic.h>
+#include <asm/processor.h>
+
+typedef void (*rthal_irq_handler_t)(unsigned irq,
+ void *cookie);
+
+struct rthal_calibration_data {
+
+ unsigned long cpu_freq;
+ unsigned long timer_freq;
+};
+
+extern struct rthal_calibration_data rthal_tunables;
+
+extern volatile unsigned long rthal_cpu_realtime;
+
+extern adomain_t rthal_domain;
+
+#define RTHAL_DOMAIN_ID 0x52544149
+
+#define RTHAL_NR_SRQS 32
+
+#define RTHAL_TIMER_IRQ ADEOS_TIMER_VIRQ
+#define RTHAL_TIMER_FREQ (rthal_tunables.timer_freq)
+#define RTHAL_CPU_FREQ (rthal_tunables.cpu_freq)
+
+static inline unsigned long long rthal_rdtsc (void) {
+ unsigned long long t;
+ adeos_hw_tsc(t);
+ return t;
+}
+
+#define rthal_cli()
adeos_stall_pipeline_from(&rthal_domain)
+#define rthal_sti()
adeos_unstall_pipeline_from(&rthal_domain)
+#define rthal_local_irq_save(x) ((x) =
!!adeos_test_and_stall_pipeline_from(&rthal_domain))
+#define rthal_local_irq_restore(x)
adeos_restore_pipeline_from(&rthal_domain,(x))
+#define rthal_local_irq_flags(x) ((x) =
!!adeos_test_pipeline_from(&rthal_domain))
+#define rthal_local_irq_test()
(!!adeos_test_pipeline_from(&rthal_domain))
+#define rthal_local_irq_sync(x) ((x) =
!!adeos_test_and_unstall_pipeline_from(&rthal_domain))
+
+#define rthal_hw_lock(flags) adeos_hw_local_irq_save(flags)
+#define rthal_hw_unlock(flags) adeos_hw_local_irq_restore(flags)
+#define rthal_hw_enable() adeos_hw_sti()
+#define rthal_hw_disable() adeos_hw_cli()
+
+#define rthal_linux_sti() adeos_unstall_pipeline_from(adp_root)
+#define rthal_linux_cli() adeos_stall_pipeline_from(adp_root)
+#define rthal_linux_local_irq_save(x) ((x) =
!!adeos_test_and_stall_pipeline_from(adp_root))
+#define rthal_linux_local_irq_restore(x)
adeos_restore_pipeline_from(adp_root,x)
+#define rthal_linux_local_irq_restore_nosync(x,cpuid)
adeos_restore_pipeline_nosync(adp_root,x,cpuid)
+
+#define rthal_spin_lock(lock) adeos_spin_lock(lock)
+#define rthal_spin_unlock(lock) adeos_spin_unlock(lock)
+
+static inline void rthal_spin_lock_irq(spinlock_t *lock) {
+
+ rthal_cli();
+ rthal_spin_lock(lock);
+}
+
+static inline void rthal_spin_unlock_irq(spinlock_t *lock) {
+
+ rthal_spin_unlock(lock);
+ rthal_sti();
+}
+
+static inline unsigned long rthal_spin_lock_irqsave(spinlock_t *lock) {
+
+ unsigned long flags;
+ rthal_local_irq_save(flags);
+ rthal_spin_lock(lock);
+ return flags;
+}
+
+static inline void rthal_spin_unlock_irqrestore(unsigned long flags,
+ spinlock_t *lock) {
+ rthal_spin_unlock(lock);
+ rthal_local_irq_restore(flags);
+}
+
+#if !defined(CONFIG_ADEOS_NOTHREADS)
+
+/* Since real-time interrupt handlers are called on behalf of the RTAI
+ domain stack, we cannot infere the "current" Linux task address
+ using %esp. We must use the suspended Linux domain's stack pointer
+ instead. */
+static inline struct task_struct *rthal_get_root_current (int cpuid) {
+ return ((struct thread_info *)(adp_root->esp[cpuid] & (~(16384-1))))->task;
+}
+
+static inline struct task_struct *rthal_get_current (int cpuid)
+
+{
+ register unsigned long esp asm ("r1");
+ /* PPC32: FIXME: r2 should be ok or even __adeos_current_threadinfo() -
offsetof(THREAD) */
+ /* if ^, then on PPC64 should use paca */
+ if (esp >= rthal_domain.estackbase[cpuid] && esp <
rthal_domain.estackbase[cpuid] + 16384)
+ return rthal_get_root_current(cpuid);
+
+ return current;
+}
+
+#else /* CONFIG_ADEOS_NOTHREADS */
+
+static inline struct task_struct *rthal_get_root_current (int cpuid) {
+ return current;
+}
+
+static inline struct task_struct *rthal_get_current (int cpuid) {
+ return current;
+}
+
+#endif /* !CONFIG_ADEOS_NOTHREADS */
+
+static inline void rthal_set_timer_shot (unsigned long delay) {
+
+ if (delay) {
+ set_dec(delay);
+ }
+}
+
+ /* Private interface -- Internal use only */
+
+unsigned long rthal_critical_enter(void (*synch)(void));
+
+void rthal_critical_exit(unsigned long flags);
+
+/* The following must be in sync w/ rthal_switch_context() in
+ switch.S */
+#define RTHAL_SWITCH_FRAME_SIZE 224
+
+void rthal_switch_context(unsigned long *out_kspp,
+ unsigned long *in_kspp);
+
+#ifdef CONFIG_RTAI_HW_FPU
+
+typedef struct rthal_fpenv {
+
+ /* This layout must follow exactely the definition of the FPU
+ backup area in a PPC thread struct available from
+ <asm-ppc/processor.h>. Specifically, fpr[] an fpscr words must
+ be contiguous in memory (see arch/ppc/hal/fpu.S). */
+
+ double fpr[32];
+ unsigned long fpscr; /* mffs uses 64-bit (pad in hi/fpscr in lo) */
+} rthal_fpenv_t;
+
+void rthal_init_fpu(rthal_fpenv_t *fpuenv);
+
+void rthal_save_fpu(rthal_fpenv_t *fpuenv);
+
+void rthal_restore_fpu(rthal_fpenv_t *fpuenv);
+
+#ifndef CONFIG_SMP
+#define rthal_get_fpu_owner(cur) last_task_used_math
+#else /* CONFIG_SMP */
+#define rthal_get_fpu_owner(cur) ({ \
+ struct task_struct * _cur = (cur); \
+ ((_cur->thread.regs && (_cur->thread.regs->msr & MSR_FP)) \
+ ? _cur : NULL); \
+})
+#endif /* CONFIG_SMP */
+
+#define rthal_disable_fpu() ({ \
+ register unsigned long _msr; \
+ __asm__ __volatile__ ( "mfmsr %0" : "=r"(_msr) ); \
+ __asm__ __volatile__ ( "mtmsrd %0" \
+ : /* no output */ \
+ : "r"(_msr & ~(MSR_FP)) \
+ : "memory" ); \
+})
+
+#define rthal_enable_fpu() ({ \
+ register unsigned long _msr; \
+ __asm__ __volatile__ ( "mfmsr %0" : "=r"(_msr) ); \
+ __asm__ __volatile__ ( "mtmsrd %0" \
+ : /* no output */ \
+ : "r"(_msr | MSR_FP) \
+ : "memory" ); \
+})
+
+#endif /* CONFIG_RTAI_HW_FPU */
+
+#endif /* __KERNEL__ && !__cplusplus */
+
+ /* Public interface */
+
+#ifdef __KERNEL__
+
+#include <linux/kernel.h>
+
+typedef int (*rthal_trap_handler_t)(adevinfo_t *evinfo);
+
+#define rthal_printk printk /* This is safe over Adeos */
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+int rthal_request_irq(unsigned irq,
+ void (*handler)(unsigned irq, void *cookie),
+ void *cookie);
+
+int rthal_release_irq(unsigned irq);
+
+/**
+ * @name Programmable Interrupt Controllers (PIC) management functions.
+ *
+ [EMAIL PROTECTED]/
+
+int rthal_enable_irq(unsigned irq);
+
+int rthal_disable_irq(unsigned irq);
+
+/[EMAIL PROTECTED]/
+
+int rthal_request_linux_irq(unsigned irq,
+ irqreturn_t (*handler)(int irq,
+ void *dev_id,
+ struct pt_regs *regs),
+ char *name,
+ void *dev_id);
+
+int rthal_release_linux_irq(unsigned irq,
+ void *dev_id);
+
+int rthal_pend_linux_irq(unsigned irq);
+
+int rthal_pend_linux_srq(unsigned srq);
+
+int rthal_request_srq(unsigned label,
+ void (*handler)(void));
+
+int rthal_release_srq(unsigned srq);
+
+int rthal_set_irq_affinity(unsigned irq,
+ cpumask_t cpumask,
+ cpumask_t *oldmask);
+
+int rthal_request_timer(void (*handler)(void),
+ unsigned long nstick);
+
+void rthal_release_timer(void);
+
+rthal_trap_handler_t rthal_set_trap_handler(rthal_trap_handler_t handler);
+
+unsigned long rthal_calibrate_timer(void);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __KERNEL__ */
+
+/[EMAIL PROTECTED]/
+
+#endif /* !_RTAI_ASM_PPC_HAL_H */
diff -Nru fusion-0.7.1/include/nucleus/asm-ppc64/syscall.h
fusion-0.7.1-ppc64-devel/include/nucleus/asm-ppc64/syscall.h
--- fusion-0.7.1/include/nucleus/asm-ppc64/syscall.h 1970-01-01
02:00:00.000000000 +0200
+++ fusion-0.7.1-ppc64-devel/include/nucleus/asm-ppc64/syscall.h
2005-06-01 21:28:23.000000000 +0300
@@ -0,0 +1,181 @@
+/*
+ * RTAI/fusion 64-bit PowerPC adoption
+ * Copyright (C) 2005 Taneli Vähäkangas and Heikki Lindholm
+ * based on previous work:
+ *
+ * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <[EMAIL PROTECTED]>.
+ *
+ * RTAI/fusion is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * RTAI/fusion is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with RTAI/fusion; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+
+#ifndef _RTAI_ASM_PPC64_SYSCALL_H
+#define _RTAI_ASM_PPC64_SYSCALL_H
+
+#include <rtai_config.h>
+#include <asm/ptrace.h>
+
+/*
+ * Some of the following macros have been adapted from Linux's
+ * implementation of the syscall mechanism in <asm-ppc64/unistd.h>:
+ *
+ * The following code defines an inline syscall mechanism used by
+ * RTAI/fusion's real-time interfaces to invoke the skin module
+ * services in kernel space.
+ */
+
+#define XENOMAI_DO_SYSCALL(nr, id, op, args...) \
+ ({ \
+ register unsigned long __sc_0 __asm__ ("r0"); \
+ register unsigned long __sc_3 __asm__ ("r3"); \
+ register unsigned long __sc_4 __asm__ ("r4"); \
+ register unsigned long __sc_5 __asm__ ("r5"); \
+ register unsigned long __sc_6 __asm__ ("r6"); \
+ register unsigned long __sc_7 __asm__ ("r7"); \
+ \
+ LOADARGS_##nr(__xn_mux_code(id,op), args); \
+ __asm__ __volatile__ \
+ ("sc \n\t" \
+ "mfcr %0 " \
+ : "=&r" (__sc_0), \
+ "=&r" (__sc_3), "=&r" (__sc_4), \
+ "=&r" (__sc_5), "=&r" (__sc_6), \
+ "=&r" (__sc_7) \
+ : ASM_INPUT_##nr \
+ : "cr0", "ctr", "memory", \
+ "r8", "r9", "r10","r11", "r12"); \
+ (int)((__sc_0 & (1 << 28)) ? -__sc_3 : __sc_3); \
+ })
+
+#define LOADARGS_0(muxcode, dummy...) \
+ __sc_0 = muxcode
+#define LOADARGS_1(muxcode, arg1) \
+ LOADARGS_0(muxcode); \
+ __sc_3 = (unsigned long) (arg1)
+#define LOADARGS_2(muxcode, arg1, arg2) \
+ LOADARGS_1(muxcode, arg1); \
+ __sc_4 = (unsigned long) (arg2)
+#define LOADARGS_3(muxcode, arg1, arg2, arg3) \
+ LOADARGS_2(muxcode, arg1, arg2); \
+ __sc_5 = (unsigned long) (arg3)
+#define LOADARGS_4(muxcode, arg1, arg2, arg3, arg4) \
+ LOADARGS_3(muxcode, arg1, arg2, arg3); \
+ __sc_6 = (unsigned long) (arg4)
+#define LOADARGS_5(muxcode, arg1, arg2, arg3, arg4, arg5) \
+ LOADARGS_4(muxcode, arg1, arg2, arg3, arg4); \
+ __sc_7 = (unsigned long) (arg5)
+
+#define ASM_INPUT_0 "0" (__sc_0)
+#define ASM_INPUT_1 ASM_INPUT_0, "1" (__sc_3)
+#define ASM_INPUT_2 ASM_INPUT_1, "2" (__sc_4)
+#define ASM_INPUT_3 ASM_INPUT_2, "3" (__sc_5)
+#define ASM_INPUT_4 ASM_INPUT_3, "4" (__sc_6)
+#define ASM_INPUT_5 ASM_INPUT_4, "5" (__sc_7)
+
+/* Register mapping for accessing syscall args. */
+
+#define __xn_reg_mux(regs) ((regs)->gpr[0])
+#define __xn_reg_rval(regs) ((regs)->gpr[3])
+#define __xn_reg_arg1(regs) ((regs)->gpr[3])
+#define __xn_reg_arg2(regs) ((regs)->gpr[4])
+#define __xn_reg_arg3(regs) ((regs)->gpr[5])
+#define __xn_reg_arg4(regs) ((regs)->gpr[6])
+#define __xn_reg_arg5(regs) ((regs)->gpr[7])
+
+#define __xn_reg_mux_p(regs) ((__xn_reg_mux(regs) & 0xffff) ==
__xn_sys_mux)
+#define __xn_mux_id(regs) ((__xn_reg_mux(regs) >> 16) & 0xff)
+#define __xn_mux_op(regs) ((__xn_reg_mux(regs) >> 24) & 0xff)
+#define __xn_mux_code(id,op) ((op << 24)|((id << 16) &
0xff0000)|(__xn_sys_mux & 0xffff))
+
+#define XENOMAI_SYSCALL0(op) XENOMAI_DO_SYSCALL(0,0,op)
+#define XENOMAI_SYSCALL1(op,a1) XENOMAI_DO_SYSCALL(1,0,op,a1)
+#define XENOMAI_SYSCALL2(op,a1,a2) XENOMAI_DO_SYSCALL(2,0,op,a1,a2)
+#define XENOMAI_SYSCALL3(op,a1,a2,a3) XENOMAI_DO_SYSCALL(3,0,op,a1,a2,a3)
+#define XENOMAI_SYSCALL4(op,a1,a2,a3,a4)
XENOMAI_DO_SYSCALL(4,0,op,a1,a2,a3,a4)
+#define XENOMAI_SYSCALL5(op,a1,a2,a3,a4,a5)
XENOMAI_DO_SYSCALL(5,0,op,a1,a2,a3,a4,a5)
+
+#define XENOMAI_SKINCALL0(id,op) XENOMAI_DO_SYSCALL(0,id,op)
+#define XENOMAI_SKINCALL1(id,op,a1) XENOMAI_DO_SYSCALL(1,id,op,a1)
+#define XENOMAI_SKINCALL2(id,op,a1,a2)
XENOMAI_DO_SYSCALL(2,id,op,a1,a2)
+#define XENOMAI_SKINCALL3(id,op,a1,a2,a3)
XENOMAI_DO_SYSCALL(3,id,op,a1,a2,a3)
+#define XENOMAI_SKINCALL4(id,op,a1,a2,a3,a4)
XENOMAI_DO_SYSCALL(4,id,op,a1,a2,a3,a4)
+#define XENOMAI_SKINCALL5(id,op,a1,a2,a3,a4,a5)
XENOMAI_DO_SYSCALL(5,id,op,a1,a2,a3,a4,a5)
+
+#ifdef __KERNEL__
+
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+
+/* Our own set of copy-to/from-user macros which must bypass
+ might_sleep() checks. The caller cannot fault and is expected to
+ have checked for bad range before using the copy macros, so we
+ should not have to care about the result. The apparently
+ preposterous do-while bracketing just helps silencing GCC's
+ "warn_unused_result" attribute on the related kernel macros. */
+#define __xn_copy_from_user(task,dstP,srcP,n) \
+do { \
+ if (__copy_from_user_inatomic(dstP,srcP,n)) \
+ ; \
+} while(0)
+#define __xn_copy_to_user(task,dstP,srcP,n) \
+do { \
+ if (__copy_to_user_inatomic(dstP,srcP,n)) \
+ ; \
+} while(0)
+#define __xn_put_user(task,src,dstP) __put_user(src,dstP)
+#define __xn_get_user(task,dst,srcP) __get_user(dst,srcP)
+
+#define __xn_range_ok(task,addr,size) \
+ __access_ok(((__force unsigned long)(addr)),(size),(task->thread.fs))
+
+#define __xn_access_ok(task,type,addr,size) __xn_range_ok(task,addr,size)
+
+/* Purposedly used inlines and not macros for the following routines
+ so that we don't risk spurious side-effects on the value arg. */
+
+static inline void __xn_success_return(struct pt_regs *regs, int v) {
+ __xn_reg_rval(regs) = v;
+}
+
+static inline void __xn_error_return(struct pt_regs *regs, int v) {
+ /* We currently never set the SO bit for marking errors, even if
+ * we always test it upon syscall return. */
+ __xn_reg_rval(regs) = v;
+}
+
+static inline void __xn_status_return(struct pt_regs *regs, int v) {
+ __xn_reg_rval(regs) = v;
+}
+
+static inline int __xn_interrupted_p(struct pt_regs *regs) {
+ return __xn_reg_rval(regs) == -EINTR;
+}
+
+#else /* !__KERNEL__ */
+
+#define CONFIG_RTAI_HW_DIRECT_TSC 1
+
+static inline unsigned long long __xn_rdtsc (void)
+
+{
+ unsigned long long t;
+
+ __asm__ __volatile__ ("mftb %0\n" : "=r" (t));
+ return t;
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* !_RTAI_ASM_PPC_SYSCALL_H */
diff -Nru fusion-0.7.1/include/nucleus/asm-ppc64/system.h
fusion-0.7.1-ppc64-devel/include/nucleus/asm-ppc64/system.h
--- fusion-0.7.1/include/nucleus/asm-ppc64/system.h 1970-01-01
02:00:00.000000000 +0200
+++ fusion-0.7.1-ppc64-devel/include/nucleus/asm-ppc64/system.h 2005-06-01
16:28:18.000000000 +0300
@@ -0,0 +1,968 @@
+/*
+ * RTAI/fusion 64-bit PowerPC adoption
+ * Copyright (C) 2005 Taneli Vähäkangas and Heikki Lindholm
+ * based on previous work:
+ *
+ * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <[EMAIL PROTECTED]>.
+ *
+ * RTAI/fusion is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * RTAI/fusion is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with RTAI/fusion; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+
+#ifndef _RTAI_ASM_PPC64_SYSTEM_H
+#define _RTAI_ASM_PPC64_SYSTEM_H
+
+#ifdef __KERNEL__
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/adeos.h>
+#include <linux/vmalloc.h>
+#include <asm/uaccess.h>
+#include <asm/param.h>
+#include <asm/mmu_context.h>
+#include <rtai_config.h>
+#include <nucleus/asm/hal.h>
+#include <nucleus/asm/atomic.h>
+#include <nucleus/shadow.h>
+
+#if ADEOS_RELEASE_NUMBER < 0x0206060f
+#error "Adeos 2.6r6c15/ppc or above is required to run this software; please
upgrade."
+#error "See http://download.gna.org/adeos/patches/v2.6/ppc/"
+#endif
+
+#define module_param_value(parm) (parm)
+
+typedef unsigned long spl_t;
+
+#define splhigh(x) rthal_local_irq_save(x)
+#ifdef CONFIG_SMP
+#define splexit(x) rthal_local_irq_restore((x) & 1)
+#else /* !CONFIG_SMP */
+#define splexit(x) rthal_local_irq_restore(x)
+#endif /* CONFIG_SMP */
+#define splnone() rthal_sti()
+#define spltest() rthal_local_irq_test()
+#define splget(x) rthal_local_irq_flags(x)
+#define splsync(x) rthal_local_irq_sync(x)
+
+typedef unsigned long xnlock_t;
+
+#define XNARCH_LOCK_UNLOCKED 0
+
+#ifdef CONFIG_SMP
+
+#define xnlock_get_irqsave(lock,x) ((x) = __xnlock_get_irqsave(lock))
+#define xnlock_clear_irqoff(lock) xnlock_put_irqrestore(lock,1)
+#define xnlock_clear_irqon(lock) xnlock_put_irqrestore(lock,0)
+
+static inline void xnlock_init (xnlock_t *lock) {
+
+ *lock = XNARCH_LOCK_UNLOCKED;
+}
+
+static inline spl_t __xnlock_get_irqsave (xnlock_t *lock)
+
+{
+ adeos_declare_cpuid;
+ spl_t flags;
+
+ rthal_local_irq_save(flags);
+
+ adeos_load_cpuid();
+
+ if (!test_and_set_bit(cpuid,lock))
+ {
+ while (test_and_set_bit(BITS_PER_LONG - 1,lock))
+ /* Use a non-locking test in the inner loop, as Linux'es
+ bit_spin_lock. */
+ while (test_bit(BITS_PER_LONG - 1, lock))
+ cpu_relax();
+ }
+ else
+ flags |= 2;
+
+ return flags;
+}
+
+static inline void xnlock_put_irqrestore (xnlock_t *lock, spl_t flags)
+
+{
+ if (!(flags & 2))
+ {
+ adeos_declare_cpuid;
+
+ rthal_cli();
+
+ adeos_load_cpuid();
+
+ if (test_bit(cpuid,lock))
+ {
+ clear_bit(cpuid,lock);
+ clear_bit(BITS_PER_LONG - 1,lock);
+ }
+ }
+
+ rthal_local_irq_restore(flags & 1);
+}
+
+#define XNARCH_PASSTHROUGH_IRQS /*empty*/
+
+#else /* !CONFIG_SMP */
+
+#define xnlock_init(lock) do { } while(0)
+#define xnlock_get_irqsave(lock,x) rthal_local_irq_save(x)
+#define xnlock_put_irqrestore(lock,x) rthal_local_irq_restore(x)
+#define xnlock_clear_irqoff(lock) rthal_cli()
+#define xnlock_clear_irqon(lock) rthal_sti()
+
+#endif /* CONFIG_SMP */
+
+#define XNARCH_NR_CPUS RTHAL_NR_CPUS
+
+#define XNARCH_DEFAULT_TICK 1000000 /* ns, i.e. 1ms */
+#define XNARCH_HOST_TICK (1000000000UL/HZ)
+
+#define XNARCH_THREAD_STACKSZ 16384
+#define XNARCH_ROOT_STACKSZ 0 /* Only a placeholder -- no stack */
+
+#define XNARCH_PROMPT "RTAI: "
+#define xnarch_loginfo(fmt,args...) printk(KERN_INFO XNARCH_PROMPT fmt,
##args)
+#define xnarch_logwarn(fmt,args...) printk(KERN_WARNING XNARCH_PROMPT fmt,
##args)
+#define xnarch_logerr(fmt,args...) printk(KERN_ERR XNARCH_PROMPT fmt, ##args)
+#define xnarch_printf(fmt,args...) printk(KERN_INFO XNARCH_PROMPT fmt,
##args)
+
+#define xnarch_ullmod(ull,uld,rem) ({ xnarch_ulldiv(ull,uld,rem); (*rem); })
+#define xnarch_uldiv(ull, d) rthal_uldivrem(ull, d, NULL)
+#define xnarch_ulmod(ull, d) ({ u_long _rem; \
+ rthal_uldivrem(ull,d,&_rem); _rem; })
+
+#define xnarch_ullmul rthal_ullmul
+#define xnarch_uldivrem rthal_uldivrem
+#define xnarch_ulldiv rthal_ulldiv
+#define xnarch_imuldiv rthal_imuldiv
+#define xnarch_llimd rthal_llimd
+#define xnarch_get_cpu_tsc rthal_rdtsc
+
+typedef cpumask_t xnarch_cpumask_t;
+#ifdef CONFIG_SMP
+#define xnarch_cpu_online_map cpu_online_map
+#else
+#define xnarch_cpu_online_map cpumask_of_cpu(0)
+#endif
+#define xnarch_num_online_cpus() num_online_cpus()
+#define xnarch_cpu_set(cpu, mask) cpu_set(cpu, mask)
+#define xnarch_cpu_clear(cpu, mask) cpu_clear(cpu, mask)
+#define xnarch_cpus_clear(mask) cpus_clear(mask)
+#define xnarch_cpu_isset(cpu, mask) cpu_isset(cpu, mask)
+#define xnarch_cpus_and(dst, src1, src2) cpus_and(dst, src1, src2)
+#define xnarch_cpus_equal(mask1, mask2) cpus_equal(mask1, mask2)
+#define xnarch_cpus_empty(mask) cpus_empty(mask)
+#define xnarch_cpumask_of_cpu(cpu) cpumask_of_cpu(cpu)
+#define xnarch_first_cpu(mask) first_cpu(mask)
+#define XNARCH_CPU_MASK_ALL CPU_MASK_ALL
+
+struct xnthread;
+struct xnheap;
+struct task_struct;
+
+#define xnarch_stack_size(tcb) ((tcb)->stacksize)
+#define xnarch_user_task(tcb) ((tcb)->user_task)
+
+typedef struct xnarchtcb { /* Per-thread arch-dependent block */
+
+ /* Kernel mode side */
+
+#ifdef CONFIG_RTAI_HW_FPU
+ /* We only care for basic FPU handling in kernel-space; Altivec
+ and SPE are not available to kernel-based nucleus threads. */
+ rthal_fpenv_t fpuenv __attribute__ ((aligned (16)));
+ rthal_fpenv_t *fpup; /* Pointer to the FPU backup area */
+ struct task_struct *user_fpu_owner;
+ /* Pointer the the FPU owner in userspace:
+ - NULL for RT K threads,
+ - last_task_used_math for Linux US threads (only current or NULL when
MP)
+ - current for RT US threads.
+ */
+#define xnarch_fpu_ptr(tcb) ((tcb)->fpup)
+#else /* !CONFIG_RTAI_HW_FPU */
+#define xnarch_fpu_ptr(tcb) NULL
+#endif /* CONFIG_RTAI_HW_FPU */
+
+ unsigned stacksize; /* Aligned size of stack (bytes) */
+ unsigned long *stackbase; /* Stack space */
+ unsigned long ksp; /* Saved KSP for kernel-based threads */
+ unsigned long ksp_vsid;
+ unsigned long *kspp; /* Pointer to saved KSP (&ksp or
&user->thread.ksp) */
+
+ /* User mode side */
+ struct task_struct *user_task; /* Shadowed user-space task */
+ struct task_struct *active_task; /* Active user-space task */
+
+ /* Init block */
+ struct xnthread *self;
+ int imask;
+ const char *name;
+ void (*entry)(void *cookie);
+ void *cookie;
+
+} xnarchtcb_t;
+
+typedef struct xnarch_fltinfo {
+
+ struct pt_regs *regs;
+
+} xnarch_fltinfo_t;
+
+#define xnarch_fault_trap(fi) ((unsigned int)(fi)->regs->trap)
+#define xnarch_fault_code(fi) ((fi)->regs->dar)
+#define xnarch_fault_pc(fi) ((fi)->regs->nip)
+
+typedef struct xnarch_heapcb {
+
+ atomic_t numaps; /* # of active user-space mappings. */
+
+ int kmflags; /* Kernel memory flags (0 if vmalloc()). */
+
+ void *heapbase; /* Shared heap memory base. */
+
+} xnarch_heapcb_t;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+static inline long long xnarch_tsc_to_ns (long long ts) {
+ return xnarch_llimd(ts,1000000000,RTHAL_CPU_FREQ);
+}
+
+static inline long long xnarch_ns_to_tsc (long long ns) {
+ return xnarch_llimd(ns,RTHAL_CPU_FREQ,1000000000);
+}
+
+static inline unsigned long long xnarch_get_cpu_time (void) {
+ return xnarch_tsc_to_ns(xnarch_get_cpu_tsc());
+}
+
+static inline unsigned long long xnarch_get_cpu_freq (void) {
+ return RTHAL_CPU_FREQ;
+}
+
+static inline unsigned xnarch_current_cpu (void) {
+ return adeos_processor_id();
+}
+
+static inline void *xnarch_sysalloc (u_long bytes)
+
+{
+#if 0 /* FIXME: likely on-demand mapping bug here */
+ if (bytes >= 128*1024)
+ return vmalloc(bytes);
+#endif
+
+ return kmalloc(bytes,GFP_KERNEL);
+}
+
+static inline void xnarch_sysfree (void *chunk, u_long bytes)
+
+{
+#if 0 /* FIXME: likely on-demand mapping bug here */
+ if (bytes >= 128*1024)
+ vfree(chunk);
+ else
+#endif
+ kfree(chunk);
+}
+
+#define xnarch_declare_cpuid adeos_declare_cpuid
+#define xnarch_get_cpu(flags) adeos_get_cpu(flags)
+#define xnarch_put_cpu(flags) adeos_put_cpu(flags)
+
+#define xnarch_halt(emsg) \
+do { \
+ adeos_set_printk_sync(adp_current); \
+ xnarch_logerr("fatal: %s\n",emsg); \
+ show_stack(NULL,NULL); \
+ for (;;) ; \
+} while(0)
+
+#define xnarch_alloc_stack xnmalloc
+#define xnarch_free_stack xnfree
+
+static inline int xnarch_setimask (int imask)
+
+{
+ spl_t s;
+ splhigh(s);
+ splexit(!!imask);
+ return !!s;
+}
+
+#ifdef XENO_INTR_MODULE
+
+static inline int xnarch_hook_irq (unsigned irq,
+ void (*handler)(unsigned irq,
+ void *cookie),
+ void *cookie)
+{
+ return rthal_request_irq(irq,handler,cookie);
+}
+
+static inline int xnarch_release_irq (unsigned irq) {
+
+ return rthal_release_irq(irq);
+}
+
+static inline int xnarch_enable_irq (unsigned irq)
+
+{
+ return rthal_enable_irq(irq);
+}
+
+static inline int xnarch_disable_irq (unsigned irq)
+
+{
+ return rthal_disable_irq(irq);
+}
+
+static inline void xnarch_chain_irq (unsigned irq)
+
+{
+ rthal_pend_linux_irq(irq);
+}
+
+static inline void xnarch_relay_tick (void)
+
+{
+ rthal_pend_linux_irq(ADEOS_TIMER_VIRQ);
+}
+
+static inline cpumask_t xnarch_set_irq_affinity (unsigned irq,
+ xnarch_cpumask_t affinity)
+{
+ return adeos_set_irq_affinity(irq,affinity);
+}
+
+#endif /* XENO_INTR_MODULE */
+
+#ifdef XENO_POD_MODULE
+
+void xnpod_welcome_thread(struct xnthread *);
+
+void xnpod_delete_thread(struct xnthread *);
+
+static inline int xnarch_start_timer (unsigned long ns,
+ void (*tickhandler)(void))
+{
+ return rthal_request_timer(tickhandler,ns);
+}
+
+static inline void xnarch_leave_root (xnarchtcb_t *rootcb)
+
+{
+ adeos_declare_cpuid;
+
+ adeos_load_cpuid();
+
+ /* rthal_cpu_realtime is only tested for the current processor,
+ and always inside a critical section. */
+ __set_bit(cpuid,&rthal_cpu_realtime);
+ /* Remember the preempted Linux task pointer. */
+ rootcb->user_task = rootcb->active_task = rthal_get_current(cpuid);
+#ifdef CONFIG_RTAI_HW_FPU
+ rootcb->user_fpu_owner = rthal_get_fpu_owner(rootcb->user_task);
+ /* So that xnarch_save_fpu() will operate on the right FPU area. */
+ rootcb->fpup = (rootcb->user_fpu_owner
+ ? (rthal_fpenv_t *)&rootcb->user_fpu_owner->thread.fpr[0]
+ : NULL);
+#endif /* CONFIG_RTAI_HW_FPU */
+}
+
+static inline void xnarch_enter_root (xnarchtcb_t *rootcb) {
+ __clear_bit(xnarch_current_cpu(),&rthal_cpu_realtime);
+}
+
+/* from asm/mmu_context.h */
+static inline void xnarch_switch_to (xnarchtcb_t *out_tcb,
+ xnarchtcb_t *in_tcb)
+{
+ struct task_struct *prev = out_tcb->active_task;
+ struct task_struct *next = in_tcb->user_task;
+
+ in_tcb->active_task = next ?: prev;
+
+ if (next && next != prev) /* Switch to new user-space thread? */
+ {
+ struct mm_struct *mm = next->active_mm;
+
+ /* Switch the mm context.*/
+
+#ifdef CONFIG_ALTIVEC
+ /* Don't rely on FTR fixups --
+ they don't work properly in our context. */
+ if (cur_cpu_spec[0]->cpu_features & CPU_FTR_ALTIVEC) {
+ asm volatile (
+ "dssall;\n"
+ : : );
+ }
+#endif /* CONFIG_ALTIVEC */
+
+ if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) {
+ cpu_set(smp_processor_id(), mm->cpu_vm_mask);
+ }
+
+ if (cur_cpu_spec->cpu_features & CPU_FTR_SLB) {
+ switch_slb(next, mm);
+ }
+ else {
+ switch_stab(next, mm);
+ }
+
+ flush_tlb_pending();
+
+ _switch(&prev->thread, &next->thread);
+
+ barrier();
+ }
+ else {
+ /* Kernel-to-kernel context switch. */
+ rthal_switch_context(out_tcb->kspp,in_tcb->kspp);
+ }
+}
+
+static inline void xnarch_finalize_and_switch (xnarchtcb_t *dead_tcb,
+ xnarchtcb_t *next_tcb)
+{
+ xnarch_switch_to(dead_tcb,next_tcb);
+}
+
+static inline void xnarch_finalize_no_switch (xnarchtcb_t *dead_tcb)
+
+{
+ /* Empty */
+}
+
+static inline void xnarch_init_root_tcb (xnarchtcb_t *tcb,
+ struct xnthread *thread,
+ const char *name)
+{
+ tcb->user_task = current;
+ tcb->active_task = NULL;
+ tcb->ksp = 0;
+ tcb->kspp = &tcb->ksp;
+#ifdef CONFIG_RTAI_HW_FPU
+ tcb->user_fpu_owner = NULL;
+ tcb->fpup = NULL;
+#endif /* CONFIG_RTAI_HW_FPU */
+ tcb->entry = NULL;
+ tcb->cookie = NULL;
+ tcb->self = thread;
+ tcb->imask = 0;
+ tcb->name = name;
+}
+
+asmlinkage static void xnarch_thread_trampoline (xnarchtcb_t *tcb)
+
+{
+ rthal_local_irq_restore(!!tcb->imask);
+ xnpod_welcome_thread(tcb->self);
+ tcb->entry(tcb->cookie);
+ xnpod_delete_thread(tcb->self);
+}
+
+static inline void xnarch_init_thread (xnarchtcb_t *tcb,
+ void (*entry)(void *),
+ void *cookie,
+ int imask,
+ struct xnthread *thread,
+ char *name)
+{
+ unsigned long *ksp, flags;
+
+ adeos_hw_local_irq_flags(flags);
+
+ *tcb->stackbase = 0;
+ ksp = (unsigned long *)((((unsigned long)tcb->stackbase + tcb->stacksize -
0x10) & ~0xf) - RTHAL_SWITCH_FRAME_SIZE);
+ tcb->ksp = (unsigned long)ksp - STACK_FRAME_OVERHEAD;
+ ksp[19] = (unsigned long)tcb; /* r3 */
+ ksp[25] = (unsigned long)&xnarch_thread_trampoline; /* lr */
+ ksp[26] = flags & ~(MSR_EE | MSR_FP); /* msr */
+
+ tcb->entry = entry;
+ tcb->cookie = cookie;
+ tcb->self = thread;
+ tcb->imask = imask;
+ tcb->name = name;
+}
+
+static inline void xnarch_enable_fpu (xnarchtcb_t *current_tcb)
+
+{
+#ifdef CONFIG_RTAI_HW_FPU
+ if(!current_tcb->user_task)
+ rthal_enable_fpu();
+#endif /* CONFIG_RTAI_HW_FPU */
+}
+
+static inline void xnarch_init_fpu (xnarchtcb_t *tcb)
+
+{
+#ifdef CONFIG_RTAI_HW_FPU
+ /* Initialize the FPU for an emerging kernel-based RT thread. This
+ must be run on behalf of the emerging thread. */
+ memset(&tcb->fpuenv,0,sizeof(tcb->fpuenv));
+ rthal_init_fpu(&tcb->fpuenv);
+#endif /* CONFIG_RTAI_HW_FPU */
+}
+
+static inline void xnarch_save_fpu (xnarchtcb_t *tcb)
+
+{
+#ifdef CONFIG_RTAI_HW_FPU
+ if(tcb->fpup)
+ {
+ rthal_save_fpu(tcb->fpup);
+
+ if(tcb->user_fpu_owner && tcb->user_fpu_owner->thread.regs)
+ tcb->user_fpu_owner->thread.regs->msr &= ~MSR_FP;
+ }
+
+#endif /* CONFIG_RTAI_HW_FPU */
+}
+
+static inline void xnarch_restore_fpu (xnarchtcb_t *tcb)
+
+{
+#ifdef CONFIG_RTAI_HW_FPU
+ if(tcb->fpup)
+ {
+ rthal_restore_fpu(tcb->fpup);
+
+ if(tcb->user_fpu_owner && tcb->user_fpu_owner->thread.regs)
+ tcb->user_fpu_owner->thread.regs->msr |= MSR_FP;
+ }
+
+ /* FIXME: We restore FPU "as it was" when RTAI preempted Linux, whereas we
+ could be much lazier. */
+ if(tcb->user_task)
+ rthal_disable_fpu();
+
+#endif /* CONFIG_RTAI_HW_FPU */
+}
+
+#ifdef CONFIG_SMP
+
+static inline int xnarch_send_ipi (xnarch_cpumask_t cpumask) {
+
+ return adeos_send_ipi(ADEOS_SERVICE_IPI0, cpumask);
+}
+
+static inline int xnarch_hook_ipi (void (*handler)(void))
+
+{
+ return adeos_virtualize_irq_from(&rthal_domain,
+ ADEOS_SERVICE_IPI0,
+ (void (*)(unsigned)) handler,
+ NULL,
+ IPIPE_HANDLE_MASK);
+}
+
+static inline int xnarch_release_ipi (void)
+
+{
+ return adeos_virtualize_irq_from(&rthal_domain,
+ ADEOS_SERVICE_IPI0,
+ NULL,
+ NULL,
+ IPIPE_PASS_MASK);
+}
+
+static inline void xnarch_notify_halt(void)
+
+{
+ unsigned long flags = adeos_critical_enter(NULL);
+ adeos_critical_exit(flags);
+}
+
+#else /* !CONFIG_SMP */
+
+static inline int xnarch_send_ipi (xnarch_cpumask_t cpumask)
+
+{
+ return 0;
+}
+
+static inline int xnarch_hook_ipi (void (*handler)(void))
+
+{
+ return 0;
+}
+
+static inline int xnarch_release_ipi (void)
+
+{
+ return 0;
+}
+
+#define xnarch_notify_halt() /* Nullified */
+
+#endif /* CONFIG_SMP */
+
+static inline void xnarch_notify_shutdown(void)
+
+{
+#ifdef CONFIG_SMP
+ /* The HAL layer also sets the same CPU affinity so that both
+ modules keep their execution sequence on SMP boxen. */
+ set_cpus_allowed(current,cpumask_of_cpu(0));
+#endif /* CONFIG_SMP */
+#ifdef CONFIG_RTAI_OPT_FUSION
+ xnshadow_release_events();
+#endif /* CONFIG_RTAI_OPT_FUSION */
+ /* Wait for the currently processed events to drain. */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(50);
+ xnarch_release_ipi();
+}
+
+static inline int xnarch_escalate (void)
+
+{
+ extern int xnarch_escalation_virq;
+
+ if (adp_current == adp_root)
+ {
+ spl_t s;
+ splsync(s);
+ adeos_trigger_irq(xnarch_escalation_virq);
+ splexit(s);
+ return 1;
+ }
+
+ return 0;
+}
+
+static void xnarch_notify_ready (void)
+
+{
+#ifdef CONFIG_RTAI_OPT_FUSION
+ xnshadow_grab_events();
+#endif /* CONFIG_RTAI_OPT_FUSION */
+}
+
+#endif /* XENO_POD_MODULE */
+
+#ifdef XENO_THREAD_MODULE
+
+static inline void xnarch_init_tcb (xnarchtcb_t *tcb) {
+
+ tcb->user_task = NULL;
+ tcb->active_task = NULL;
+ tcb->kspp = &tcb->ksp;
+#ifdef CONFIG_RTAI_HW_FPU
+ tcb->user_fpu_owner = NULL;
+ tcb->fpup = &tcb->fpuenv;
+#endif /* CONFIG_RTAI_HW_FPU */
+ /* Must be followed by xnarch_init_thread(). */
+}
+
+#endif /* XENO_THREAD_MODULE */
+
+#ifdef XENO_SHADOW_MODULE
+
+static inline void xnarch_init_shadow_tcb (xnarchtcb_t *tcb,
+ struct xnthread *thread,
+ const char *name)
+{
+ struct task_struct *task = current;
+
+ tcb->user_task = task;
+ tcb->active_task = NULL;
+ tcb->ksp = 0;
+ tcb->kspp = &task->thread.ksp;
+#ifdef CONFIG_RTAI_HW_FPU
+ tcb->user_fpu_owner = task;
+ tcb->fpup = (rthal_fpenv_t *)&task->thread.fpr[0];
+#endif /* CONFIG_RTAI_HW_FPU */
+ tcb->entry = NULL;
+ tcb->cookie = NULL;
+ tcb->self = thread;
+ tcb->imask = 0;
+ tcb->name = name;
+}
+
+static inline void xnarch_grab_xirqs (void (*handler)(unsigned irq))
+
+{
+ unsigned irq;
+
+ for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
+ adeos_virtualize_irq(irq,
+ handler,
+ NULL,
+ IPIPE_DYNAMIC_MASK);
+
+ /* On this arch, the decrementer trap is not an external IRQ but
+ it is instead mapped to a virtual IRQ, so we must grab it
+ individually. */
+
+ adeos_virtualize_irq(ADEOS_TIMER_VIRQ,
+ handler,
+ NULL,
+ IPIPE_DYNAMIC_MASK);
+}
+
+static inline void xnarch_lock_xirqs (adomain_t *adp, int cpuid)
+
+{
+ unsigned irq;
+
+ for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
+ {
+ switch (irq)
+ {
+#ifdef CONFIG_SMP
+ case ADEOS_CRITICAL_IPI:
+
+ /* Never lock out this one. */
+ continue;
+#endif /* CONFIG_SMP */
+
+ default:
+
+ __adeos_lock_irq(adp,cpuid,irq);
+ }
+ }
+
+ __adeos_lock_irq(adp,cpuid,ADEOS_TIMER_VIRQ);
+}
+
+static inline void xnarch_unlock_xirqs (adomain_t *adp, int cpuid)
+
+{
+ unsigned irq;
+
+ for (irq = 0; irq < IPIPE_NR_XIRQS; irq++)
+ {
+ switch (irq)
+ {
+#ifdef CONFIG_SMP
+ case ADEOS_CRITICAL_IPI:
+
+ continue;
+#endif /* CONFIG_SMP */
+
+ default:
+
+ __adeos_unlock_irq(adp,irq);
+ }
+ }
+
+ __adeos_unlock_irq(adp,ADEOS_TIMER_VIRQ);
+}
+
+#endif /* XENO_SHADOW_MODULE */
+
+#ifdef XENO_TIMER_MODULE
+
+static inline void xnarch_program_timer_shot (unsigned long delay) {
+ /* Even though some architectures may use a 64 bits delay here, we
+ voluntarily limit to 32 bits, 4 billions ticks should be enough
+ for now. If a timer needs more, a spurious but harmless call to
+ the tick handler will occur after 4 billions ticks. Since the
+ timebase value is used to express CPU ticks on the PowerPC
+ port, there is no need to rescale the delay value. */
+ rthal_set_timer_shot(delay);
+}
+
+static inline void xnarch_stop_timer (void) {
+ rthal_release_timer();
+}
+
+static inline int xnarch_send_timer_ipi (xnarch_cpumask_t mask)
+
+{
+#ifdef CONFIG_SMP
+ return -1; /* FIXME */
+#else /* ! CONFIG_SMP */
+ return 0;
+#endif /* CONFIG_SMP */
+}
+
+static inline void xnarch_read_timings (unsigned long long *shot,
+ unsigned long long *delivery,
+ unsigned long long defval)
+{
+#ifdef CONFIG_ADEOS_PROFILING
+ int cpuid = adeos_processor_id();
+ *shot = __adeos_profile_data[cpuid].irqs[__adeos_tick_irq].t_handled;
+ *delivery = __adeos_profile_data[cpuid].irqs[__adeos_tick_irq].t_synced;
+#else /* !CONFIG_ADEOS_PROFILING */
+ *shot = defval;
+ *delivery = defval;
+#endif /* CONFIG_ADEOS_PROFILING */
+}
+
+#endif /* XENO_TIMER_MODULE */
+
+#ifdef XENO_HEAP_MODULE
+
+#include <linux/mm.h>
+
+static inline void xnarch_init_heapcb (xnarch_heapcb_t *hcb)
+
+{
+ atomic_set(&hcb->numaps,0);
+ hcb->kmflags = 0;
+ hcb->heapbase = NULL;
+}
+
+static inline int xnarch_remap_page_range(struct vm_area_struct *vma,
+ unsigned long uvaddr,
+ unsigned long paddr,
+ unsigned long size,
+ pgprot_t prot)
+{
+ return remap_pfn_range(vma,uvaddr,paddr >> PAGE_SHIFT,size,prot);
+}
+
+#endif /* XENO_HEAP_MODULE */
+
+#ifdef XENO_MAIN_MODULE
+
+#include <linux/init.h>
+#include <nucleus/asm/calibration.h>
+
+extern u_long nkschedlat;
+
+extern u_long nktimerlat;
+
+int xnarch_escalation_virq;
+
+int xnpod_trap_fault(xnarch_fltinfo_t *fltinfo);
+
+void xnpod_schedule_handler(void);
+
+static rthal_trap_handler_t xnarch_old_trap_handler;
+
+static int xnarch_trap_fault (adevinfo_t *evinfo)
+
+{
+ xnarch_fltinfo_t fltinfo;
+ fltinfo.regs = (struct pt_regs *)evinfo->evdata;
+ return xnpod_trap_fault(&fltinfo);
+}
+
+unsigned long xnarch_calibrate_timer (void)
+
+{
+#if CONFIG_RTAI_HW_TIMER_LATENCY != 0
+ return xnarch_ns_to_tsc(CONFIG_RTAI_HW_TIMER_LATENCY) ?: 1;
+#else /* CONFIG_RTAI_HW_TIMER_LATENCY unspecified. */
+ /* Compute the time needed to program the decrementer in aperiodic
+ mode. The return value is expressed in timebase ticks. */
+ return xnarch_ns_to_tsc(rthal_calibrate_timer()) ?: 1;
+#endif /* CONFIG_RTAI_HW_TIMER_LATENCY != 0 */
+}
+
+int xnarch_calibrate_sched (void)
+
+{
+ nktimerlat = xnarch_calibrate_timer();
+
+ if (!nktimerlat)
+ return -ENODEV;
+
+ nkschedlat = xnarch_ns_to_tsc(xnarch_get_sched_latency());
+
+ return 0;
+}
+
+static inline int xnarch_init (void)
+
+{
+ int err;
+
+#ifdef CONFIG_SMP
+ /* The HAL layer also sets the same CPU affinity so that both
+ modules keep their execution sequence on SMP boxen. */
+ set_cpus_allowed(current,cpumask_of_cpu(0));
+#endif /* CONFIG_SMP */
+
+ err = xnarch_calibrate_sched();
+
+ if (err)
+ return err;
+
+ xnarch_escalation_virq = adeos_alloc_irq();
+
+ if (xnarch_escalation_virq == 0)
+ return -ENOSYS;
+
+ adeos_virtualize_irq_from(&rthal_domain,
+ xnarch_escalation_virq,
+ (void (*)(unsigned))&xnpod_schedule_handler,
+ NULL,
+ IPIPE_HANDLE_MASK);
+
+ xnarch_old_trap_handler = rthal_set_trap_handler(&xnarch_trap_fault);
+
+#ifdef CONFIG_RTAI_OPT_FUSION
+ err = xnshadow_mount();
+#endif /* CONFIG_RTAI_OPT_FUSION */
+
+ if (err)
+ {
+ rthal_set_trap_handler(xnarch_old_trap_handler);
+ adeos_free_irq(xnarch_escalation_virq);
+ }
+
+ return err;
+}
+
+static inline void xnarch_exit (void)
+
+{
+#ifdef CONFIG_RTAI_OPT_FUSION
+ xnshadow_cleanup();
+#endif /* CONFIG_RTAI_OPT_FUSION */
+ rthal_set_trap_handler(xnarch_old_trap_handler);
+ adeos_free_irq(xnarch_escalation_virq);
+}
+
+#endif /* XENO_MAIN_MODULE */
+
+#ifdef __cplusplus
+}
+#endif
+
+/* Dashboard and graph control. */
+#define XNARCH_DECL_DISPLAY_CONTEXT();
+#define xnarch_init_display_context(obj)
+#define xnarch_create_display(obj,name,tag)
+#define xnarch_delete_display(obj)
+#define xnarch_post_graph(obj,state)
+#define xnarch_post_graph_if(obj,state,cond)
+
+#else /* !__KERNEL__ */
+
+#include <nucleus/system.h>
+
+#endif /* __KERNEL__ */
+
+#endif /* !_RTAI_ASM_PPC64_SYSTEM_H */
diff -Nru fusion-0.7.1/include/nucleus/asm-ppc64/uart.h
fusion-0.7.1-ppc64-devel/include/nucleus/asm-ppc64/uart.h
--- fusion-0.7.1/include/nucleus/asm-ppc64/uart.h 1970-01-01
02:00:00.000000000 +0200
+++ fusion-0.7.1-ppc64-devel/include/nucleus/asm-ppc64/uart.h 2005-03-06
21:04:33.000000000 +0200
@@ -0,0 +1,35 @@
+/**
+ * @file
+ * This file is part of the RTAI project.
+ *
+ * @note Copyright (C) 2004 Philippe Gerum <[EMAIL PROTECTED]>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _RTAI_ASM_PPC_UART_H
+#define _RTAI_ASM_PPC_UART_H
+
+#include <linux/config.h>
+
+#if defined(CONFIG_SANDPOINT)
+
+#define TTYS0 { 0xfe0003f8, 4 }
+#define TTYS1 { 0xfe0002f8, 3 }
+#else
+#error "UART configuration is undefined for this PowerPC platform"
+#endif
+
+#endif /* !_RTAI_ASM_PPC_UART_H */
diff -Nru fusion-0.7.1/skins/rtai/lib/queue.c
fusion-0.7.1-ppc64-devel/skins/rtai/lib/queue.c
--- fusion-0.7.1/skins/rtai/lib/queue.c 2004-12-24 11:12:13.000000000 +0200
+++ fusion-0.7.1-ppc64-devel/skins/rtai/lib/queue.c 2005-05-23
16:09:34.000000000 +0300
@@ -187,7 +187,7 @@
mode);
}
-int rt_queue_recv (RT_QUEUE *q,
+ssize_t rt_queue_recv (RT_QUEUE *q,
void **bufp,
RTIME timeout)
{
diff -Nru linux-2.6.10/adeos/generic.c
linux-2.6.10-adeos-ppc64-devel/adeos/generic.c
--- linux-2.6.10/adeos/generic.c 1970-01-01 02:00:00.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-devel/adeos/generic.c 2005-05-12
13:36:51.000000000 +0300
@@ -0,0 +1,639 @@
+/*
+ * linux/adeos/generic.c
+ *
+ * Copyright (C) 2002 Philippe Gerum.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Architecture-independent ADEOS services.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/irq.h>
+
+MODULE_DESCRIPTION("Adeos nanokernel");
+MODULE_AUTHOR("Philippe Gerum");
+MODULE_LICENSE("GPL");
+
+/* adeos_register_domain() -- Add a new domain to the system. All
+ client domains must call this routine to register themselves to
+ ADEOS before using its services. */
+
+int adeos_register_domain (adomain_t *adp, adattr_t *attr)
+
+{
+ struct list_head *pos;
+ unsigned long flags;
+ int n;
+
+ if (adp_current != adp_root)
+ {
+ printk(KERN_WARNING "Adeos: Only the root domain may register a new
domain.\n");
+ return -EPERM;
+ }
+
+ flags = adeos_critical_enter(NULL);
+
+ list_for_each(pos,&__adeos_pipeline) {
+ adomain_t *_adp = list_entry(pos,adomain_t,p_link);
+ if (_adp->domid == attr->domid)
+ break;
+ }
+
+ adeos_critical_exit(flags);
+
+ if (pos != &__adeos_pipeline)
+ /* A domain with the given id already exists -- fail. */
+ return -EBUSY;
+
+ for (n = 0; n < ADEOS_NR_CPUS; n++)
+ {
+ /* Each domain starts in sleeping state on every CPU. */
+ adp->cpudata[n].status = (1 << IPIPE_SLEEP_FLAG);
+#ifdef CONFIG_ADEOS_THREADS
+ adp->estackbase[n] = 0;
+#endif /* CONFIG_ADEOS_THREADS */
+ }
+
+ adp->name = attr->name;
+ adp->priority = attr->priority;
+ adp->domid = attr->domid;
+ adp->dswitch = attr->dswitch;
+ adp->flags = 0;
+ adp->ptd_setfun = attr->ptdset;
+ adp->ptd_getfun = attr->ptdget;
+ adp->ptd_keymap = 0;
+ adp->ptd_keycount = 0;
+ adp->ptd_keymax = attr->nptdkeys;
+
+ for (n = 0; n < ADEOS_NR_EVENTS; n++)
+ /* Event handlers must be cleared before the i-pipe stage is
+ inserted since an exception may occur on behalf of the new
+ emerging domain. */
+ adp->events[n].handler = NULL;
+
+ if (attr->entry != NULL)
+ __adeos_init_domain(adp,attr);
+
+ /* Insert the domain in the interrupt pipeline last, so it won't
+ be resumed for processing interrupts until it has a valid stack
+ context. */
+
+ __adeos_init_stage(adp);
+
+ INIT_LIST_HEAD(&adp->p_link);
+
+ flags = adeos_critical_enter(NULL);
+
+ list_for_each(pos,&__adeos_pipeline) {
+ adomain_t *_adp = list_entry(pos,adomain_t,p_link);
+ if (adp->priority > _adp->priority)
+ break;
+ }
+
+ list_add_tail(&adp->p_link,pos);
+
+ adeos_critical_exit(flags);
+
+ printk(KERN_WARNING "Adeos: Domain %s registered.\n",adp->name);
+
+ /* Finally, allow the new domain to perform its initialization
+ chores. */
+
+ if (attr->entry != NULL)
+ {
+ adeos_declare_cpuid;
+
+ adeos_lock_cpu(flags);
+
+#ifdef CONFIG_ADEOS_THREADS
+ __adeos_switch_to(adp_root,adp,cpuid);
+#else /* !CONFIG_ADEOS_THREADS */
+ adp_cpu_current[cpuid] = adp;
+ attr->entry(1);
+ adp_cpu_current[cpuid] = adp_root;
+#endif /* CONFIG_ADEOS_THREADS */
+
+ adeos_load_cpuid(); /* Processor might have changed. */
+
+ if (adp_root->cpudata[cpuid].irq_pending_hi != 0 &&
+ !test_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status))
+ __adeos_sync_stage(IPIPE_IRQMASK_ANY);
+
+ adeos_unlock_cpu(flags);
+ }
+
+ return 0;
+}
+
+/* adeos_unregister_domain() -- Remove a domain from the system. All
+ client domains must call this routine to unregister themselves from
+ the ADEOS layer. */
+
+int adeos_unregister_domain (adomain_t *adp)
+
+{
+ unsigned long flags;
+ unsigned event;
+
+ if (adp_current != adp_root)
+ {
+ printk(KERN_WARNING "Adeos: Only the root domain may unregister a
domain.\n");
+ return -EPERM;
+ }
+
+ if (adp == adp_root)
+ {
+ printk(KERN_WARNING "Adeos: Cannot unregister the root domain.\n");
+ return -EPERM;
+ }
+
+ for (event = 0; event < ADEOS_NR_EVENTS; event++)
+ /* Need this to update the monitor count. */
+ adeos_catch_event_from(adp,event,NULL);
+
+#ifdef CONFIG_SMP
+ {
+ int nr_cpus = num_online_cpus(), _cpuid;
+ unsigned irq;
+
+ /* In the SMP case, wait for the logged events to drain on other
+ processors before eventually removing the domain from the
+ pipeline. */
+
+ adeos_unstall_pipeline_from(adp);
+
+ flags = adeos_critical_enter(NULL);
+
+ for (irq = 0; irq < IPIPE_NR_IRQS; irq++)
+ {
+ clear_bit(IPIPE_HANDLE_FLAG,&adp->irqs[irq].control);
+ clear_bit(IPIPE_STICKY_FLAG,&adp->irqs[irq].control);
+ set_bit(IPIPE_PASS_FLAG,&adp->irqs[irq].control);
+ }
+
+ adeos_critical_exit(flags);
+
+ for (_cpuid = 0; _cpuid < nr_cpus; _cpuid++)
+ {
+ for (irq = 0; irq < IPIPE_NR_IRQS; irq++)
+ while (adp->cpudata[_cpuid].irq_hits[irq] > 0)
+ cpu_relax();
+
+ while (test_bit(IPIPE_XPEND_FLAG,&adp->cpudata[_cpuid].status))
+ cpu_relax();
+
+ while (!test_bit(IPIPE_SLEEP_FLAG,&adp->cpudata[_cpuid].status))
+ cpu_relax();
+ }
+ }
+#endif /* CONFIG_SMP */
+
+ /* Simply remove the domain from the pipeline and we are almost
+ done. */
+
+ flags = adeos_critical_enter(NULL);
+ list_del_init(&adp->p_link);
+ adeos_critical_exit(flags);
+
+ __adeos_cleanup_domain(adp);
+
+ printk(KERN_WARNING "Adeos: Domain %s unregistered.\n",adp->name);
+
+ return 0;
+}
+
+/* adeos_propagate_irq() -- Force a given IRQ propagation on behalf of
+ a running interrupt handler to the next domain down the pipeline.
+ Returns non-zero if a domain has received the interrupt
+ notification, zero otherwise.
+ This call is useful for handling shared interrupts among domains.
+ e.g. pipeline = [domain-A]---[domain-B]...
+ Both domains share IRQ #X.
+ - domain-A handles IRQ #X but does not pass it down (i.e. Terminate
+ or Dynamic interrupt control mode)
+ - domain-B handles IRQ #X (i.e. Terminate or Accept interrupt
+ control modes).
+ When IRQ #X is raised, domain-A's handler determines whether it
+ should process the interrupt by identifying its source. If not,
+ adeos_propagate_irq() is called so that the next domain down the
+ pipeline which handles IRQ #X is given a chance to process it. This
+ process can be repeated until the end of the pipeline is
+ reached. */
+
+/* adeos_schedule_irq() -- Almost the same as adeos_propagate_irq(),
+ but attempts to pend the interrupt for the current domain first. */
+
+int fastcall __adeos_schedule_irq (unsigned irq, struct list_head *head)
+
+{
+ struct list_head *ln;
+ unsigned long flags;
+ adeos_declare_cpuid;
+
+ if (irq >= IPIPE_NR_IRQS ||
+ (adeos_virtual_irq_p(irq) && !test_bit(irq -
IPIPE_VIRQ_BASE,&__adeos_virtual_irq_map)))
+ return -EINVAL;
+
+ adeos_lock_cpu(flags);
+
+ ln = head;
+
+ while (ln != &__adeos_pipeline)
+ {
+ adomain_t *adp = list_entry(ln,adomain_t,p_link);
+
+ if (test_bit(IPIPE_HANDLE_FLAG,&adp->irqs[irq].control))
+ {
+ adp->cpudata[cpuid].irq_hits[irq]++;
+ __adeos_set_irq_bit(adp,cpuid,irq);
+ adeos_unlock_cpu(flags);
+ return 1;
+ }
+
+ ln = adp->p_link.next;
+ }
+
+ adeos_unlock_cpu(flags);
+
+ return 0;
+}
+
+/* adeos_free_irq() -- Return a previously allocated virtual/soft
+ pipelined interrupt to the pool of allocatable interrupts. */
+
+int adeos_free_irq (unsigned irq)
+
+{
+ if (irq >= IPIPE_NR_IRQS)
+ return -EINVAL;
+
+ clear_bit(irq - IPIPE_VIRQ_BASE,&__adeos_virtual_irq_map);
+
+ return 0;
+}
+
+cpumask_t adeos_set_irq_affinity (unsigned irq, cpumask_t cpumask)
+
+{
+#ifdef CONFIG_SMP
+ if (irq >= IPIPE_NR_XIRQS)
+ /* Allow changing affinity of external IRQs only. */
+ return CPU_MASK_NONE;
+
+ if (num_online_cpus() > 1)
+ /* Allow changing affinity of external IRQs only. */
+ return __adeos_set_irq_affinity(irq,cpumask);
+#endif /* CONFIG_SMP */
+
+ return CPU_MASK_NONE;
+}
+
+/* adeos_catch_event_from() -- Interpose an event handler starting
+ from a given domain. */
+
+int adeos_catch_event_from (adomain_t *adp, unsigned event, void
(*handler)(adevinfo_t *))
+
+{
+ if (event >= ADEOS_NR_EVENTS)
+ return -EINVAL;
+
+ if (!xchg(&adp->events[event].handler,handler))
+ {
+ if (handler)
+ __adeos_event_monitors[event]++;
+ }
+ else if (!handler)
+ __adeos_event_monitors[event]--;
+
+ return 0;
+}
+
+void adeos_init_attr (adattr_t *attr)
+
+{
+ attr->name = "Anonymous";
+ attr->domid = 1;
+ attr->entry = NULL;
+ attr->estacksz = 0; /* Let ADEOS choose a reasonable stack size */
+ attr->priority = ADEOS_ROOT_PRI;
+ attr->dswitch = NULL;
+ attr->nptdkeys = 0;
+ attr->ptdset = NULL;
+ attr->ptdget = NULL;
+}
+
+int adeos_alloc_ptdkey (void)
+
+{
+ unsigned long flags;
+ int key = -1;
+
+ adeos_spin_lock_irqsave(&__adeos_pipelock,flags);
+
+ if (adp_current->ptd_keycount < adp_current->ptd_keymax)
+ {
+ key = ffz(adp_current->ptd_keymap);
+ set_bit(key,&adp_current->ptd_keymap);
+ adp_current->ptd_keycount++;
+ }
+
+ adeos_spin_unlock_irqrestore(&__adeos_pipelock,flags);
+
+ return key;
+}
+
+int adeos_free_ptdkey (int key)
+
+{
+ unsigned long flags;
+
+ if (key < 0 || key >= adp_current->ptd_keymax)
+ return -EINVAL;
+
+ adeos_spin_lock_irqsave(&__adeos_pipelock,flags);
+
+ if (test_and_clear_bit(key,&adp_current->ptd_keymap))
+ adp_current->ptd_keycount--;
+
+ adeos_spin_unlock_irqrestore(&__adeos_pipelock,flags);
+
+ return 0;
+}
+
+int adeos_set_ptd (int key, void *value)
+
+{
+ if (key < 0 || key >= adp_current->ptd_keymax)
+ return -EINVAL;
+
+ if (!adp_current->ptd_setfun)
+ {
+ printk(KERN_WARNING "Adeos: No ptdset hook for %s\n",adp_current->name);
+ return -EINVAL;
+ }
+
+ adp_current->ptd_setfun(key,value);
+
+ return 0;
+}
+
+void *adeos_get_ptd (int key)
+
+{
+ if (key < 0 || key >= adp_current->ptd_keymax)
+ return NULL;
+
+ if (!adp_current->ptd_getfun)
+ {
+ printk(KERN_WARNING "Adeos: No ptdget hook for %s\n",adp_current->name);
+ return NULL;
+ }
+
+ return adp_current->ptd_getfun(key);
+}
+
+int adeos_init_mutex (admutex_t *mutex)
+
+{
+ admutex_t initm = ADEOS_MUTEX_UNLOCKED;
+ *mutex = initm;
+ return 0;
+}
+
+#ifdef CONFIG_ADEOS_THREADS
+
+int adeos_destroy_mutex (admutex_t *mutex)
+
+{
+ if (!adeos_spin_trylock(&mutex->lock) &&
+ adp_current != adp_root &&
+ mutex->owner != adp_current)
+ return -EBUSY;
+
+ return 0;
+}
+
+static inline void __adeos_sleepon_mutex (admutex_t *mutex, adomain_t
*sleeper, int cpuid)
+
+{
+ adomain_t *owner = mutex->owner;
+
+ /* Make the current domain (== sleeper) wait for the mutex to be
+ released. Adeos' pipelined scheme guarantees that the new
+ sleeper _is_ higher priority than any aslept domain since we
+ have stalled each sleeper's stage. Must be called with local hw
+ interrupts off. */
+
+ sleeper->m_link = mutex->sleepq;
+ mutex->sleepq = sleeper;
+ __adeos_switch_to(adp_cpu_current[cpuid],owner,cpuid);
+ mutex->owner = sleeper;
+ adeos_spin_unlock(&mutex->lock);
+}
+
+unsigned long fastcall adeos_lock_mutex (admutex_t *mutex)
+
+{
+ unsigned long flags, hwflags;
+ adeos_declare_cpuid;
+ adomain_t *adp;
+
+ if (!adp_pipelined)
+ {
+ adeos_hw_local_irq_save(hwflags);
+ flags = !adeos_hw_test_iflag(hwflags);
+ adeos_spin_lock(&mutex->lock);
+ return flags;
+ }
+
+ adeos_lock_cpu(hwflags);
+
+ adp = adp_cpu_current[cpuid];
+
+ flags = __test_and_set_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
+
+ /* Two cases to handle here on SMP systems, only one for UP: 1) in
+ case of a conflicting access from a higher priority domain
+ running on the same cpu, make this domain sleep on the mutex,
+ and resume the current owner so it can release the lock asap.
+ 2) in case of a conflicting access from any domain on a
+ different cpu than the current owner's, simply enter a spinning
+ loop. Note that testing mutex->owncpu is safe since it is only
+ changed by the current owner, and set to -1 when the mutex is
+ unlocked. */
+
+#ifdef CONFIG_SMP
+ while (!adeos_spin_trylock(&mutex->lock))
+ {
+ if (mutex->owncpu == cpuid)
+ {
+ __adeos_sleepon_mutex(mutex,adp,cpuid);
+ adeos_load_cpuid();
+ }
+ }
+
+ mutex->owncpu = cpuid;
+#else /* !CONFIG_SMP */
+ while (mutex->owner != NULL && mutex->owner != adp)
+ __adeos_sleepon_mutex(mutex,adp,cpuid);
+#endif /* CONFIG_SMP */
+
+ mutex->owner = adp;
+
+ adeos_unlock_cpu(hwflags);
+
+ return flags;
+}
+
+void fastcall adeos_unlock_mutex (admutex_t *mutex, unsigned long flags)
+
+{
+ unsigned long hwflags;
+ adeos_declare_cpuid;
+ adomain_t *adp;
+
+ if (!adp_pipelined)
+ {
+ adeos_spin_unlock(&mutex->lock);
+
+ if (flags)
+ adeos_hw_cli();
+ else
+ adeos_hw_sti();
+
+ return;
+ }
+
+#ifdef CONFIG_SMP
+ mutex->owncpu = -1;
+#endif /* CONFIG_SMP */
+
+ if (!flags)
+ adeos_hw_sti(); /* Absolutely needed. */
+
+ adeos_lock_cpu(hwflags);
+
+ if (mutex->sleepq != NULL)
+ {
+ adomain_t *sleeper = mutex->sleepq;
+ /* Wake up the highest priority sleeper. */
+ mutex->sleepq = sleeper->m_link;
+ __adeos_switch_to(adp_cpu_current[cpuid],sleeper,cpuid);
+ adeos_load_cpuid();
+ }
+ else
+ {
+ mutex->owner = NULL;
+ adeos_spin_unlock(&mutex->lock);
+ }
+
+ adp = adp_cpu_current[cpuid];
+
+ if (flags)
+ __set_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
+ else
+ {
+ __clear_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
+
+ if (adp->cpudata[cpuid].irq_pending_hi != 0)
+ __adeos_sync_stage(IPIPE_IRQMASK_ANY);
+ }
+
+ adeos_unlock_cpu(hwflags);
+}
+
+#else /* !CONFIG_ADEOS_THREADS */
+
+int adeos_destroy_mutex (admutex_t *mutex)
+
+{
+ if (!adeos_spin_trylock(&mutex->lock) &&
+ adp_current != adp_root)
+ return -EBUSY;
+
+ return 0;
+}
+
+unsigned long fastcall adeos_lock_mutex (admutex_t *mutex)
+
+{
+ unsigned long flags; /* FIXME: won't work on SPARC */
+ adeos_spin_lock_irqsave(&mutex->lock,flags);
+ return flags;
+}
+
+void fastcall adeos_unlock_mutex (admutex_t *mutex, unsigned long flags)
+
+{
+ adeos_spin_unlock_irqrestore(&mutex->lock,flags);
+}
+
+#endif /* CONFIG_ADEOS_THREADS */
+
+void __adeos_takeover (void)
+
+{
+ __adeos_enable_pipeline();
+ printk(KERN_WARNING "Adeos: Pipelining started.\n");
+}
+
+#ifdef MODULE
+
+static int __init adeos_init_module (void)
+
+{
+ __adeos_takeover();
+ return 0;
+}
+
+static void __exit adeos_exit_module (void)
+
+{
+ __adeos_disable_pipeline();
+ printk(KERN_WARNING "Adeos: Pipelining stopped.\n");
+}
+
+module_init(adeos_init_module);
+module_exit(adeos_exit_module);
+
+#endif /* MODULE */
+
+EXPORT_SYMBOL(adeos_register_domain);
+EXPORT_SYMBOL(adeos_unregister_domain);
+EXPORT_SYMBOL(adeos_virtualize_irq_from);
+EXPORT_SYMBOL(adeos_control_irq);
+EXPORT_SYMBOL(adeos_propagate_irq);
+EXPORT_SYMBOL(__adeos_schedule_irq);
+EXPORT_SYMBOL(adeos_free_irq);
+EXPORT_SYMBOL(adeos_send_ipi);
+EXPORT_SYMBOL(adeos_catch_event_from);
+EXPORT_SYMBOL(adeos_init_attr);
+EXPORT_SYMBOL(adeos_get_sysinfo);
+EXPORT_SYMBOL(adeos_tune_timer);
+EXPORT_SYMBOL(adeos_alloc_ptdkey);
+EXPORT_SYMBOL(adeos_free_ptdkey);
+EXPORT_SYMBOL(adeos_set_ptd);
+EXPORT_SYMBOL(adeos_get_ptd);
+EXPORT_SYMBOL(adeos_set_irq_affinity);
+EXPORT_SYMBOL(adeos_init_mutex);
+EXPORT_SYMBOL(adeos_destroy_mutex);
+EXPORT_SYMBOL(adeos_lock_mutex);
+EXPORT_SYMBOL(adeos_unlock_mutex);
diff -Nru linux-2.6.10/adeos/Kconfig
linux-2.6.10-adeos-ppc64-devel/adeos/Kconfig
--- linux-2.6.10/adeos/Kconfig 1970-01-01 02:00:00.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-devel/adeos/Kconfig 2005-05-12
13:36:51.000000000 +0300
@@ -0,0 +1,37 @@
+menu "Adeos support"
+
+config ADEOS
+ tristate "Adeos support"
+ default y
+ ---help---
+ Activate this option if you want the Adeos nanokernel to be
+ compiled in.
+
+config ADEOS_CORE
+ def_bool ADEOS
+
+config ADEOS_THREADS
+ bool "Threaded domains"
+ depends on ADEOS
+ default y
+ ---help---
+ This option causes the domains to run as lightweight
+ threads, which is useful for having seperate stacks
+ for domains. Enabling this option is the safest setting for
+ now; disabling it causes an experimental mode to be used
+ where interrupts/events are directly processed on behalf of
+ the preempted context. Say Y if unsure.
+
+config ADEOS_NOTHREADS
+ def_bool !ADEOS_THREADS
+
+config ADEOS_PROFILING
+ bool "Pipeline profiling"
+ depends on ADEOS
+ default n
+ ---help---
+ This option activates the profiling code which collects the
+ timestamps needed to measure the propagation time of
+ interrupts through the pipeline. Say N if unsure.
+
+endmenu
diff -Nru linux-2.6.10/adeos/Makefile
linux-2.6.10-adeos-ppc64-devel/adeos/Makefile
--- linux-2.6.10/adeos/Makefile 1970-01-01 02:00:00.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-devel/adeos/Makefile 2005-06-01
16:07:52.000000000 +0300
@@ -0,0 +1,15 @@
+#
+# Makefile for the Adeos layer.
+#
+
+obj-$(CONFIG_ADEOS) += adeos.o
+
+adeos-objs := generic.o
+
+adeos-$(CONFIG_X86) += x86.o
+
+adeos-$(CONFIG_IA64) += ia64.o
+
+adeos-$(CONFIG_PPC32) += ppc.o
+
+adeos-$(CONFIG_PPC64) += ppc64.o
diff -Nru linux-2.6.10/adeos/ppc64.c
linux-2.6.10-adeos-ppc64-devel/adeos/ppc64.c
--- linux-2.6.10/adeos/ppc64.c 1970-01-01 02:00:00.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-devel/adeos/ppc64.c 2005-06-01
15:46:17.000000000 +0300
@@ -0,0 +1,527 @@
+/*
+ * linux/adeos/ppc64.c
+ *
+ * Adeos 64-bit PowerPC adoption
+ * Copyright (C) 2005 Taneli Vähäkangas and Heikki Lindholm
+ * based on previous work:
+ *
+ * Copyright (C) 2004 Philippe Gerum.
+ *
+ * Adeos/PPC port over 2.6 based on the previous 2.4 implementation by:
+ *
+ * Copyright (C) 2004 Wolfgang Grandegger.
+ *
+ * It follows closely the ARM and x86 ports of ADEOS.
+ *
+ * Copyright (C) 2003 Philippe Gerum.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Architecture-dependent ADEOS support for PowerPC.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/errno.h>
+#include <asm/system.h>
+#include <asm/hw_irq.h>
+#include <asm/irq.h>
+#include <asm/atomic.h>
+#include <asm/io.h>
+#include <asm/time.h>
+#include <asm/cputable.h> /* cur_cpu_spec & CPU_FTR* */
+#include <asm/mmu_context.h> /* get_kernel_vsid */
+
+extern spinlock_t __adeos_pipelock;
+
+extern unsigned long __adeos_virtual_irq_map;
+
+extern struct list_head __adeos_pipeline;
+
+extern irq_desc_t irq_desc[];
+
+static struct hw_interrupt_type __adeos_std_irq_dtype[NR_IRQS];
+
+/*
+ * Check NULLs when calling dtype[].X ?
+ * (.end)
+ */
+
+static void __adeos_override_irq_enable (unsigned irq)
+
+{
+ unsigned long adflags, hwflags;
+ adeos_declare_cpuid;
+
+ adeos_hw_local_irq_save(hwflags);
+ adflags = adeos_test_and_stall_pipeline();
+ preempt_disable();
+ __adeos_unlock_irq(adp_cpu_current[cpuid],irq);
+ __adeos_std_irq_dtype[irq].enable(irq);
+ preempt_enable_no_resched();
+ adeos_restore_pipeline_nosync(adp_cpu_current[cpuid],adflags,cpuid);
+ adeos_hw_local_irq_restore(hwflags);
+}
+
+static void __adeos_override_irq_disable (unsigned irq)
+
+{
+ unsigned long adflags, hwflags;
+ adeos_declare_cpuid;
+
+ adeos_hw_local_irq_save(hwflags);
+ adflags = adeos_test_and_stall_pipeline();
+ preempt_disable();
+ __adeos_std_irq_dtype[irq].disable(irq);
+ __adeos_lock_irq(adp_cpu_current[cpuid],cpuid,irq);
+ preempt_enable_no_resched();
+ adeos_restore_pipeline_nosync(adp_cpu_current[cpuid],adflags,cpuid);
+ adeos_hw_local_irq_restore(hwflags);
+}
+
+static void __adeos_override_irq_end (unsigned irq)
+
+{
+ unsigned long adflags, hwflags;
+ adeos_declare_cpuid;
+
+ adeos_hw_local_irq_save(hwflags);
+ adflags = adeos_test_and_stall_pipeline();
+ preempt_disable();
+
+ if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ __adeos_unlock_irq(adp_cpu_current[cpuid],irq);
+
+ __adeos_std_irq_dtype[irq].end(irq);
+
+ preempt_enable_no_resched();
+ adeos_restore_pipeline_nosync(adp_cpu_current[cpuid],adflags,cpuid);
+ adeos_hw_local_irq_restore(hwflags);
+}
+
+static void __adeos_override_irq_affinity (unsigned irq, cpumask_t mask)
+
+{
+ unsigned long adflags, hwflags;
+ adeos_declare_cpuid;
+
+ adeos_hw_local_irq_save(hwflags);
+ adflags = adeos_test_and_stall_pipeline();
+ preempt_disable();
+ __adeos_std_irq_dtype[irq].set_affinity(irq,mask);
+ preempt_enable_no_resched();
+ adeos_restore_pipeline_nosync(adp_cpu_current[cpuid],adflags,cpuid);
+ adeos_hw_local_irq_restore(hwflags);
+}
+
+static void __adeos_enable_sync (void)
+
+{
+ __adeos_decr_next[adeos_processor_id()] = __adeos_read_timebase() +
get_dec();
+}
+
+/* __adeos_enable_pipeline() -- Take over the interrupt control from
+ the root domain (i.e. Linux). After this routine has returned, all
+ interrupts go through the pipeline. */
+
+void __adeos_enable_pipeline (void)
+
+{
+ unsigned long flags;
+ unsigned irq;
+
+ flags = adeos_critical_enter(&__adeos_enable_sync);
+
+ /* First, virtualize all interrupts from the root domain. */
+
+ for (irq = 0; irq < NR_IRQS; irq++)
+ adeos_virtualize_irq(irq,
+ (void (*)(unsigned))&__adeos_do_IRQ,
+ &__adeos_ack_irq,
+ IPIPE_HANDLE_MASK|IPIPE_PASS_MASK);
+
+ /* We use a virtual IRQ to handle the timer irq (decrementer trap)
+ which has been allocated early in __adeos_init_platform(). */
+
+ adeos_virtualize_irq(ADEOS_TIMER_VIRQ,
+ (void (*)(unsigned))&__adeos_do_timer,
+ NULL,
+ IPIPE_HANDLE_MASK|IPIPE_PASS_MASK);
+
+
+ /* Interpose on the IRQ control routines so we can make them
+ atomic using hw masking and prevent the interrupt log from
+ being untimely flushed. */
+
+ for (irq = 0; irq < NR_IRQS; irq++)
+ {
+ if (irq_desc[irq].handler != NULL)
+ __adeos_std_irq_dtype[irq] = *irq_desc[irq].handler;
+ }
+
+ /* The original controller structs are often shared, so we first
+ save them all before changing any of them. Notice that we don't
+ override the ack() handler since we will enforce the necessary
+ setup in __adeos_ack_irq(). */
+
+ for (irq = 0; irq < NR_IRQS; irq++)
+ {
+ struct hw_interrupt_type *handler = irq_desc[irq].handler;
+
+ if (handler == NULL)
+ continue;
+
+ if (handler->enable != NULL)
+ handler->enable = &__adeos_override_irq_enable;
+
+ if (handler->disable != NULL)
+ handler->disable = &__adeos_override_irq_disable;
+
+ if (handler->end != NULL)
+ handler->end = &__adeos_override_irq_end;
+
+ if (handler->set_affinity != NULL)
+ handler->set_affinity = &__adeos_override_irq_affinity;
+ }
+
+ __adeos_decr_next[adeos_processor_id()] = __adeos_read_timebase() +
get_dec();
+
+ adp_pipelined = 1;
+
+ adeos_critical_exit(flags);
+}
+
+/* __adeos_disable_pipeline() -- Disengage the pipeline. */
+
+void __adeos_disable_pipeline (void)
+
+{
+ unsigned long flags;
+ unsigned irq;
+
+ flags = adeos_critical_enter(NULL);
+
+ /* Restore interrupt controllers. */
+
+ for (irq = 0; irq < NR_IRQS; irq++)
+ {
+ if (irq_desc[irq].handler != NULL)
+ *irq_desc[irq].handler = __adeos_std_irq_dtype[irq];
+ }
+
+ adp_pipelined = 0;
+
+ adeos_critical_exit(flags);
+}
+
+/* adeos_virtualize_irq_from() -- Attach a handler (and optionally a
+ hw acknowledge routine) to an interrupt for the given domain. */
+
+int adeos_virtualize_irq_from (adomain_t *adp,
+ unsigned irq,
+ void (*handler)(unsigned irq),
+ int (*acknowledge)(unsigned irq),
+ unsigned modemask)
+{
+ unsigned long flags;
+ int err;
+
+ if (irq >= IPIPE_NR_IRQS)
+ return -EINVAL;
+
+ if (adp->irqs[irq].control & IPIPE_SYSTEM_MASK)
+ return -EPERM;
+
+ adeos_spin_lock_irqsave(&__adeos_pipelock,flags);
+
+ if (handler != NULL)
+ {
+ /* A bit of hack here: if we are re-virtualizing an IRQ just
+ to change the acknowledge routine by passing the special
+ ADEOS_SAME_HANDLER value, then allow to recycle the current
+ handler for the IRQ. This allows Linux device drivers
+ managing shared IRQ lines to call adeos_virtualize_irq() in
+ addition to request_irq() just for the purpose of
+ interposing their own shared acknowledge routine. */
+
+ if (handler == ADEOS_SAME_HANDLER)
+ {
+ handler = adp->irqs[irq].handler;
+
+ if (handler == NULL)
+ {
+ err = -EINVAL;
+ goto unlock_and_exit;
+ }
+ }
+ else if ((modemask & IPIPE_EXCLUSIVE_MASK) != 0 &&
+ adp->irqs[irq].handler != NULL)
+ {
+ err = -EBUSY;
+ goto unlock_and_exit;
+ }
+
+ if ((modemask & (IPIPE_SHARED_MASK|IPIPE_PASS_MASK)) ==
IPIPE_SHARED_MASK)
+ {
+ err = -EINVAL;
+ goto unlock_and_exit;
+ }
+
+ if ((modemask & IPIPE_STICKY_MASK) != 0)
+ modemask |= IPIPE_HANDLE_MASK;
+ }
+ else
+ modemask &= ~(IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK|IPIPE_SHARED_MASK);
+
+ if (acknowledge == NULL)
+ {
+ if ((modemask & IPIPE_SHARED_MASK) == 0)
+ /* Acknowledge handler unspecified -- this is ok in
+ non-shared management mode, but we will force the use
+ of the Linux-defined handler instead. */
+ acknowledge = adp_root->irqs[irq].acknowledge;
+ else
+ {
+ /* A valid acknowledge handler to be called in shared mode
+ is required when declaring a shared IRQ. */
+ err = -EINVAL;
+ goto unlock_and_exit;
+ }
+ }
+
+ adp->irqs[irq].handler = handler;
+ adp->irqs[irq].acknowledge = acknowledge;
+ adp->irqs[irq].control = modemask;
+
+ if (irq < NR_IRQS &&
+ handler != NULL &&
+ !adeos_virtual_irq_p(irq) &&
+ (modemask & IPIPE_ENABLE_MASK) != 0)
+ {
+ if (adp != adp_current)
+ {
+ /* IRQ enable/disable state is domain-sensitive, so we may
+ not change it for another domain. What is allowed
+ however is forcing some domain to handle an interrupt
+ source, by passing the proper 'adp' descriptor which
+ thus may be different from adp_current. */
+ err = -EPERM;
+ goto unlock_and_exit;
+ }
+
+ enable_irq(irq);
+ }
+
+ err = 0;
+
+unlock_and_exit:
+
+ adeos_spin_unlock_irqrestore(&__adeos_pipelock,flags);
+
+ return err;
+}
+
+/* adeos_control_irq() -- Change an interrupt mode. This affects the
+ way a given interrupt is handled by ADEOS for the current
+ domain. setmask is a bitmask telling whether:
+ - the interrupt should be passed to the domain (IPIPE_HANDLE_MASK),
+ and/or
+ - the interrupt should be passed down to the lower priority domain(s)
+ in the pipeline (IPIPE_PASS_MASK).
+ This leads to four possibilities:
+ - PASS only => Ignore the interrupt
+ - HANDLE only => Terminate the interrupt (process but don't pass down)
+ - PASS + HANDLE => Accept the interrupt (process and pass down)
+ - <none> => Discard the interrupt
+ - DYNAMIC is currently an alias of HANDLE since it marks an interrupt
+ which is processed by the current domain but not implicitely passed
+ down to the pipeline, letting the domain's handler choose on a case-
+ by-case basis whether the interrupt propagation should be forced
+ using adeos_propagate_irq().
+ clrmask clears the corresponding bits from the control field before
+ setmask is applied.
+*/
+
+int adeos_control_irq (unsigned irq,
+ unsigned clrmask,
+ unsigned setmask)
+{
+ irq_desc_t *desc;
+ unsigned long flags;
+
+ if (irq >= IPIPE_NR_IRQS)
+ return -EINVAL;
+
+ if (adp_current->irqs[irq].control & IPIPE_SYSTEM_MASK)
+ return -EPERM;
+
+ if (((setmask|clrmask) & IPIPE_SHARED_MASK) != 0)
+ return -EINVAL;
+
+ desc = irq_desc + irq;
+
+ if (adp_current->irqs[irq].handler == NULL)
+ setmask &= ~(IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK);
+
+ if ((setmask & IPIPE_STICKY_MASK) != 0)
+ setmask |= IPIPE_HANDLE_MASK;
+
+ if ((clrmask & (IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK)) != 0) /* If
one goes, both go. */
+ clrmask |= (IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK);
+
+ adeos_spin_lock_irqsave(&__adeos_pipelock,flags);
+
+ adp_current->irqs[irq].control &= ~clrmask;
+ adp_current->irqs[irq].control |= setmask;
+
+ if ((setmask & IPIPE_ENABLE_MASK) != 0)
+ enable_irq(irq);
+ else if ((clrmask & IPIPE_ENABLE_MASK) != 0)
+ disable_irq(irq);
+
+ adeos_spin_unlock_irqrestore(&__adeos_pipelock,flags);
+
+ return 0;
+}
+
+#ifdef CONFIG_ADEOS_THREADS
+
+void __adeos_init_domain (adomain_t *adp, adattr_t *attr)
+
+{
+ int estacksz = attr->estacksz > 0 ? attr->estacksz : 16384, _cpuid;
+ unsigned long flags, *ksp;
+ adeos_declare_cpuid;
+
+ adeos_hw_local_irq_flags(flags);
+
+ for (_cpuid = 0; _cpuid < num_online_cpus(); _cpuid++)
+ {
+ adp->estackbase[_cpuid] = (unsigned long)kmalloc(estacksz,GFP_KERNEL);
+
+ if (adp->estackbase[_cpuid] == 0)
+ panic("Adeos: No memory for domain stack on CPU #%d",_cpuid);
+
+ adp->esp[_cpuid] = adp->estackbase[_cpuid];
+ ksp = (unsigned long *)((adp->esp[_cpuid] + estacksz - 16) & ~0xf);
+ *ksp = 0L; /* first stack frame back-chain */
+ ksp = ksp - STACK_FRAME_OVERHEAD; /* first stack frame (entry uses)
+ * (less would do) */
+ *ksp = (unsigned long)ksp+STACK_FRAME_OVERHEAD; /* second back-chain */
+ ksp = ksp - 224; /* domain context */
+ adp->esp[_cpuid] = (unsigned long)ksp - STACK_FRAME_OVERHEAD;
+ *((unsigned long *)adp->esp[_cpuid]) = (unsigned long)ksp + 224;
/*back-chain*/
+ /* NOTE: these depend on _adeos_switch_domain ordering */
+ ksp[18] = (unsigned long)get_paca(); /* r13 needs to hold paca */
+ ksp[19] = (_cpuid == cpuid); /* r3 */
+ ksp[20] = ((unsigned long *)attr->entry)[1]; /* r2 = TOC base */
+ ksp[25] = ((unsigned long *)attr->entry)[0]; /* lr = entry addr. */
+ ksp[26] = flags & ~MSR_EE; /* msr */
+ }
+}
+
+#else /* !CONFIG_ADEOS_THREADS */
+
+void __adeos_init_domain (adomain_t *adp, adattr_t *attr)
+
+{}
+
+#endif /* CONFIG_ADEOS_THREADS */
+
+void __adeos_cleanup_domain (adomain_t *adp)
+
+{
+ int nr_cpus = num_online_cpus();
+ int _cpuid;
+
+ adeos_unstall_pipeline_from(adp);
+
+ for (_cpuid = 0; _cpuid < nr_cpus; _cpuid++)
+ {
+#ifdef CONFIG_SMP
+ while (adp->cpudata[_cpuid].irq_pending_hi != 0)
+ cpu_relax();
+
+ while (test_bit(IPIPE_XPEND_FLAG,&adp->cpudata[_cpuid].status))
+ cpu_relax();
+#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_ADEOS_THREADS
+ if (adp->estackbase[_cpuid] != 0)
+ kfree((void *)adp->estackbase[_cpuid]);
+#endif /* CONFIG_ADEOS_THREADS */
+ }
+}
+
+int adeos_get_sysinfo (adsysinfo_t *info)
+
+{
+ info->ncpus = num_online_cpus();
+ info->cpufreq = adeos_cpu_freq();
+ info->archdep.tmirq = ADEOS_TIMER_VIRQ;
+ info->archdep.tmfreq = info->cpufreq;
+
+ return 0;
+}
+
+static void __adeos_set_decr (void)
+
+{
+ adeos_declare_cpuid;
+
+ adeos_load_cpuid();
+
+ /*disarm_decr[cpuid] = (__adeos_decr_ticks != tb_ticks_per_jiffy);*/
+ __adeos_decr_next[cpuid] = __adeos_read_timebase() + __adeos_decr_ticks;
+ set_dec(__adeos_decr_ticks);
+}
+
+int adeos_tune_timer (unsigned long ns, int flags)
+
+{
+ unsigned long x, ticks;
+
+ if (flags & ADEOS_RESET_TIMER)
+ ticks = tb_ticks_per_jiffy;
+ else
+ {
+ ticks = ns * tb_ticks_per_jiffy / (1000000000 / HZ);
+
+ if (ticks > tb_ticks_per_jiffy)
+ return -EINVAL;
+ }
+
+ x = adeos_critical_enter(&__adeos_set_decr); /* Sync with all CPUs */
+ __adeos_decr_ticks = ticks;
+ __adeos_set_decr();
+ adeos_critical_exit(x);
+
+ return 0;
+}
+
+/* adeos_send_ipi() -- Send a specified service IPI to a set of
+ processors. */
+
+int adeos_send_ipi (unsigned ipi, cpumask_t cpumask)
+
+{
+ printk(KERN_WARNING "Adeos: Call to unimplemented adeos_send_ipi() from
%s\n",adp_current->name);
+ return 0;
+}
diff -Nru linux-2.6.10/arch/ppc64/Kconfig
linux-2.6.10-adeos-ppc64-devel/arch/ppc64/Kconfig
--- linux-2.6.10/arch/ppc64/Kconfig 2004-12-24 23:34:58.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-devel/arch/ppc64/Kconfig 2005-05-12
13:36:51.000000000 +0300
@@ -370,6 +370,8 @@
depends on VIOCONS || VIODASD || VIOCD || VIOTAPE || VETH
default y
+source "adeos/Kconfig"
+
source "arch/ppc64/oprofile/Kconfig"
source "arch/ppc64/Kconfig.debug"
diff -Nru linux-2.6.10/arch/ppc64/kernel/adeos.c
linux-2.6.10-adeos-ppc64-devel/arch/ppc64/kernel/adeos.c
--- linux-2.6.10/arch/ppc64/kernel/adeos.c 1970-01-01 02:00:00.000000000
+0200
+++ linux-2.6.10-adeos-ppc64-devel/arch/ppc64/kernel/adeos.c 2005-06-01
15:52:37.000000000 +0300
@@ -0,0 +1,671 @@
+/*
+ * linux/arch/ppc64/kernel/adeos.c
+ *
+ * Adeos 64-bit PowerPC adoption
+ * Copyright (C) 2005 Taneli Vähäkangas and Heikki Lindholm
+ * based on previous work:
+ *
+ * Copyright (C) 2004 Philippe Gerum.
+ *
+ * Adeos/PPC port over 2.6 based on the previous 2.4 implementation by:
+ *
+ * Copyright (C) 2004 Wolfgang Grandegger.
+ *
+ * It follows closely the ARM and x86 ports of ADEOS.
+ *
+ * Copyright (C) 2003 Philippe Gerum.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Architecture-dependent ADEOS core support for PowerPC
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <asm/system.h>
+#include <asm/atomic.h>
+#include <asm/hw_irq.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/time.h>
+#include <asm/machdep.h> /* ppc_md */
+
+#ifdef CONFIG_SMP
+
+static cpumask_t __adeos_cpu_sync_map;
+
+static cpumask_t __adeos_cpu_lock_map;
+
+static spinlock_t __adeos_cpu_barrier = SPIN_LOCK_UNLOCKED;
+
+static atomic_t __adeos_critical_count = ATOMIC_INIT(0);
+
+static void (*__adeos_cpu_sync)(void);
+
+#endif /* CONFIG_SMP */
+
+void do_IRQ(struct pt_regs *regs);
+
+extern struct list_head __adeos_pipeline;
+
+struct pt_regs __adeos_irq_regs;
+
+/* Current reload value for the decrementer. */
+unsigned long __adeos_decr_ticks;
+
+/* Next tick date (timebase value). */
+unsigned long __adeos_decr_next[ADEOS_NR_CPUS];
+
+static inline unsigned long ffnz (unsigned long ul) {
+
+ __asm__ __volatile__ ("cntlzd %0, %1" : "=r" (ul) : "r" (ul & (-ul)));
+ return 63 - ul;
+}
+
+#ifdef CONFIG_SMP
+
+/* Always called with hw interrupts off. */
+
+static void __adeos_do_critical_sync (unsigned irq)
+
+{
+ adeos_declare_cpuid;
+
+ adeos_load_cpuid();
+
+ cpu_set(cpuid,__adeos_cpu_sync_map);
+
+ /* Now we are in sync with the lock requestor running on another
+ CPU. Enter a spinning wait until he releases the global
+ lock. */
+ adeos_spin_lock(&__adeos_cpu_barrier);
+
+ /* Got it. Now get out. */
+
+ if (__adeos_cpu_sync)
+ /* Call the sync routine if any. */
+ __adeos_cpu_sync();
+
+ adeos_spin_unlock(&__adeos_cpu_barrier);
+
+ cpu_clear(cpuid,__adeos_cpu_sync_map);
+}
+
+#endif /* CONFIG_SMP */
+
+/* adeos_critical_enter() -- Grab the superlock for entering a global
+ critical section. On this uniprocessor-only arch, this is identical
+ to hw cli(). */
+
+unsigned long adeos_critical_enter (void (*syncfn)(void))
+
+{
+ unsigned long flags;
+
+ adeos_hw_local_irq_save(flags);
+
+#ifdef CONFIG_SMP
+ if (num_online_cpus() > 1) /* We might be running a SMP-kernel on a UP
box... */
+ {
+ adeos_declare_cpuid;
+ cpumask_t lock_map;
+
+ adeos_load_cpuid();
+
+ if (!cpu_test_and_set(cpuid,__adeos_cpu_lock_map))
+ {
+ while (cpu_test_and_set(BITS_PER_LONG - 1,__adeos_cpu_lock_map))
+ {
+ /* Refer to the explanations found in
+ linux/arch/asm-i386/irq.c about
+ SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND for more about
+ this strange loop. */
+ int n = 0;
+ do { cpu_relax(); } while (++n < cpuid);
+ }
+
+ adeos_spin_lock(&__adeos_cpu_barrier);
+
+ __adeos_cpu_sync = syncfn;
+
+ /* Send the sync IPI to all processors but the current one. */
+ __adeos_send_IPI_allbutself(ADEOS_CRITICAL_VECTOR);
+
+ cpus_andnot(lock_map,cpu_online_map,__adeos_cpu_lock_map);
+
+ while (!cpus_equal(__adeos_cpu_sync_map,lock_map))
+ cpu_relax();
+ }
+
+ atomic_inc(&__adeos_critical_count);
+ }
+#endif /* CONFIG_SMP */
+
+ return flags;
+}
+
+/* adeos_critical_exit() -- Release the superlock. */
+
+void adeos_critical_exit (unsigned long flags)
+
+{
+#ifdef CONFIG_SMP
+ if (num_online_cpus() > 1) /* We might be running a SMP-kernel on a UP
box... */
+ {
+ adeos_declare_cpuid;
+
+ adeos_load_cpuid();
+
+ if (atomic_dec_and_test(&__adeos_critical_count))
+ {
+ adeos_spin_unlock(&__adeos_cpu_barrier);
+
+ while (!cpus_empty(__adeos_cpu_sync_map))
+ cpu_relax();
+
+ cpu_clear(cpuid,__adeos_cpu_lock_map);
+ cpu_clear(BITS_PER_LONG - 1,__adeos_cpu_lock_map);
+ }
+ }
+#endif /* CONFIG_SMP */
+
+ adeos_hw_local_irq_restore(flags);
+}
+
+void __adeos_init_platform (void)
+
+{
+ unsigned timer_virq;
+
+ /* Allocate a virtual IRQ for the decrementer trap early to get it
+ mapped to IPIPE_VIRQ_BASE */
+
+ timer_virq = adeos_alloc_irq();
+
+ if (timer_virq != ADEOS_TIMER_VIRQ)
+ panic("Adeos: cannot reserve timer virq #%d (got #%d)",
+ ADEOS_TIMER_VIRQ,
+ timer_virq);
+
+ __adeos_decr_ticks = tb_ticks_per_jiffy;
+}
+
+void __adeos_init_stage (adomain_t *adp)
+
+{
+ int cpuid, n;
+
+ for (cpuid = 0; cpuid < ADEOS_NR_CPUS; cpuid++)
+ {
+ adp->cpudata[cpuid].irq_pending_hi = 0;
+
+ for (n = 0; n < IPIPE_IRQ_IWORDS; n++)
+ adp->cpudata[cpuid].irq_pending_lo[n] = 0;
+
+ for (n = 0; n < IPIPE_NR_IRQS; n++)
+ adp->cpudata[cpuid].irq_hits[n] = 0;
+ }
+
+ for (n = 0; n < IPIPE_NR_IRQS; n++)
+ {
+ adp->irqs[n].acknowledge = NULL;
+ adp->irqs[n].handler = NULL;
+ adp->irqs[n].control = IPIPE_PASS_MASK; /* Pass but don't handle */
+ }
+
+#ifdef CONFIG_SMP
+ adp->irqs[ADEOS_CRITICAL_IPI].acknowledge = &__adeos_ack_irq;
+ adp->irqs[ADEOS_CRITICAL_IPI].handler = &__adeos_do_critical_sync;
+ /* Immediately handle in the current domain but *never* pass */
+ adp->irqs[ADEOS_CRITICAL_IPI].control =
IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK|IPIPE_SYSTEM_MASK;
+#endif /* CONFIG_SMP */
+}
+
+/* __adeos_sync_stage() -- Flush the pending IRQs for the current
+ domain (and processor). This routine flushes the interrupt log
+ (see "Optimistic interrupt protection" from D. Stodolsky et al. for
+ more on the deferred interrupt scheme). Every interrupt that
+ occurred while the pipeline was stalled gets played. WARNING:
+ callers on SMP boxen should always check for CPU migration on
+ return of this routine. One can control the kind of interrupts
+ which are going to be sync'ed using the syncmask
+ parameter. IPIPE_IRQMASK_ANY plays them all, IPIPE_IRQMASK_VIRT
+ plays virtual interrupts only. This routine must be called with hw
+ interrupts off. */
+
+void __adeos_sync_stage (unsigned long syncmask)
+
+{
+ unsigned long mask, submask;
+ struct adcpudata *cpudata;
+ int level, rank, sync;
+ adeos_declare_cpuid;
+ adomain_t *adp;
+ unsigned irq;
+
+ adeos_load_cpuid();
+ adp = adp_cpu_current[cpuid];
+ cpudata = &adp->cpudata[cpuid];
+
+ sync = __test_and_set_bit(IPIPE_SYNC_FLAG,&cpudata->status);
+
+ /* The policy here is to keep the dispatching code interrupt-free
+ by stalling the current stage. If the upper domain handler
+ (which we call) wants to re-enable interrupts while in a safe
+ portion of the code (e.g. SA_INTERRUPT flag unset for Linux's
+ sigaction()), it will have to unstall (then stall again before
+ returning to us!) the stage when it sees fit. */
+
+ while ((mask = (cpudata->irq_pending_hi & syncmask)) != 0)
+ {
+ /* Give a slight priority advantage to high-numbered IRQs
+ like the virtual ones. */
+ level = ffnz(mask);
+ __clear_bit(level,&cpudata->irq_pending_hi);
+
+ while ((submask = cpudata->irq_pending_lo[level]) != 0)
+ {
+ rank = ffnz(submask);
+ irq = (level << IPIPE_IRQ_ISHIFT) + rank;
+
+ if (test_bit(IPIPE_LOCK_FLAG,&adp->irqs[irq].control))
+ {
+ __clear_bit(rank,&cpudata->irq_pending_lo[level]);
+ continue;
+ }
+
+ if (--cpudata->irq_hits[irq] == 0)
+ __clear_bit(rank,&cpudata->irq_pending_lo[level]);
+
+ __set_bit(IPIPE_STALL_FLAG,&cpudata->status);
+
+#ifdef CONFIG_ADEOS_PROFILING
+ __adeos_profile_data[cpuid].irqs[irq].n_synced++;
+ adeos_hw_tsc(__adeos_profile_data[cpuid].irqs[irq].t_synced);
+#endif /* CONFIG_ADEOS_PROFILING */
+
+ if (adp == adp_root)
+ adeos_hw_sti();
+
+ ((void (*)(unsigned, struct pt_regs
*))adp->irqs[irq].handler)(irq,&__adeos_irq_regs);
+
+ adeos_hw_cli();
+
+#ifdef CONFIG_SMP
+ if (adeos_processor_id() != cpuid) /* Handle CPU migration. */
+ /* We expect any domain to clear the SYNC bit each
+ time it switches in a new task, so that preemptions
+ and/or CPU migrations (in the SMP case) over the
+ ISR do not lock out the log syncer for some
+ indefinite amount of time. In the Linux case,
+ schedule() handles this (see kernel/sched.c). For
+ this reason, we don't bother clearing it here for
+ the source CPU in the migration handling case,
+ since it must have scheduled another task in by
+ now. */
+ return;
+#endif /* CONFIG_SMP */
+
+ __clear_bit(IPIPE_STALL_FLAG,&cpudata->status);
+ }
+ }
+
+ if (!sync)
+ __clear_bit(IPIPE_SYNC_FLAG,&cpudata->status);
+}
+
+int __adeos_ack_irq (unsigned irq)
+
+{
+ irq_desc_t *desc = get_irq_desc(irq);
+
+ if (desc->handler->ack != NULL)
+ {
+ unsigned long adflags;
+ adeos_declare_cpuid;
+
+ /* No need to mask IRQs at hw level: we are always called from
+ __adeos_handle_irq(), so interrupts are already off. We
+ stall the pipeline so that spin_lock_irq*() ops won't
+ unintentionally flush it, since this could cause infinite
+ recursion. */
+
+ adeos_load_cpuid();
+ adflags = adeos_test_and_stall_pipeline();
+#ifdef CONFIG_PREEMPT
+ preempt_disable();
+#endif /* CONFIG_PREEMPT */
+ spin_lock(&desc->lock);
+ desc->handler->ack(irq);
+ spin_unlock(&desc->lock);
+#ifdef CONFIG_PREEMPT
+ preempt_enable_no_resched();
+#endif /* CONFIG_PREEMPT */
+ adeos_restore_pipeline_nosync(adp_cpu_current[cpuid],adflags,cpuid);
+ }
+
+ return 1;
+}
+
+static inline void __adeos_walk_pipeline (struct list_head *pos, int cpuid)
+
+{
+ adomain_t *this_domain = adp_cpu_current[cpuid];
+
+ while (pos != &__adeos_pipeline)
+ {
+ adomain_t *next_domain = list_entry(pos,adomain_t,p_link);
+
+ if (test_bit(IPIPE_STALL_FLAG,&next_domain->cpudata[cpuid].status))
+ break; /* Stalled stage -- do not go further. */
+
+ if (next_domain->cpudata[cpuid].irq_pending_hi != 0)
+ {
+ /* Since the critical IPI might be dispatched by the
+ following actions, the current domain might not be
+ linked to the pipeline anymore after its handler
+ returns on SMP boxes, even if the domain remains valid
+ (see adeos_unregister_domain()), so don't make any
+ dangerous assumptions here. */
+
+ if (next_domain == this_domain)
+ __adeos_sync_stage(IPIPE_IRQMASK_ANY);
+ else
+ {
+ __adeos_switch_to(this_domain,next_domain,cpuid);
+
+ adeos_load_cpuid(); /* Processor might have changed. */
+
+ if (this_domain->cpudata[cpuid].irq_pending_hi != 0 &&
+
!test_bit(IPIPE_STALL_FLAG,&this_domain->cpudata[cpuid].status) &&
+
!test_bit(IPIPE_SYNC_FLAG,&this_domain->cpudata[cpuid].status))
+ __adeos_sync_stage(IPIPE_IRQMASK_ANY);
+ }
+
+ break;
+ }
+ else if (next_domain == this_domain)
+ break;
+
+ pos = next_domain->p_link.next;
+ }
+}
+
+/* __adeos_handle_irq() -- ADEOS's generic IRQ handler. An optimistic
+ interrupt protection log is maintained here for each
+ domain. Interrupts are off on entry. */
+
+void __adeos_handle_irq (int irq, struct pt_regs *regs)
+
+{
+ struct list_head *head, *pos;
+ adeos_declare_cpuid;
+ int m_ack, s_ack;
+
+ m_ack = irq & ADEOS_IRQ_ACKED;
+ irq &= ADEOS_IRQ_ACKED_MASK;
+
+ if (irq >= IPIPE_NR_IRQS)
+ {
+ printk(KERN_ERR "Adeos: spurious interrupt %d\n",irq);
+ return;
+ }
+
+ adeos_load_cpuid();
+
+#ifdef CONFIG_ADEOS_PROFILING
+ __adeos_profile_data[cpuid].irqs[irq].n_handled++;
+ adeos_hw_tsc(__adeos_profile_data[cpuid].irqs[irq].t_handled);
+#endif /* CONFIG_ADEOS_PROFILING */
+
+ s_ack = m_ack;
+
+ if (test_bit(IPIPE_STICKY_FLAG,&adp_cpu_current[cpuid]->irqs[irq].control))
+ head = &adp_cpu_current[cpuid]->p_link;
+ else
+ head = __adeos_pipeline.next;
+
+ /* Ack the interrupt. */
+
+ pos = head;
+
+ while (pos != &__adeos_pipeline)
+ {
+ adomain_t *_adp = list_entry(pos,adomain_t,p_link);
+
+ /* For each domain handling the incoming IRQ, mark it as
+ pending in its log. */
+
+ if (test_bit(IPIPE_HANDLE_FLAG,&_adp->irqs[irq].control))
+ {
+ /* Domains that handle this IRQ are polled for
+ acknowledging it by decreasing priority order. The
+ interrupt must be made pending _first_ in the domain's
+ status flags before the PIC is unlocked. */
+
+ _adp->cpudata[cpuid].irq_hits[irq]++;
+ __adeos_set_irq_bit(_adp,cpuid,irq);
+
+ /* Always get the first master acknowledge available. Once
+ we've got it, allow slave acknowledge handlers to run
+ (until one of them stops us). */
+
+ if (_adp->irqs[irq].acknowledge != NULL)
+ {
+ if (!m_ack)
+ m_ack = _adp->irqs[irq].acknowledge(irq);
+ else if (test_bit(IPIPE_SHARED_FLAG,&_adp->irqs[irq].control)
&& !s_ack)
+ s_ack = _adp->irqs[irq].acknowledge(irq);
+ }
+ }
+
+ /* If the domain does not want the IRQ to be passed down the
+ interrupt pipe, exit the loop now. */
+
+ if (!test_bit(IPIPE_PASS_FLAG,&_adp->irqs[irq].control))
+ break;
+
+ pos = _adp->p_link.next;
+ }
+
+ /* Now walk the pipeline, yielding control to the highest priority
+ domain that has pending interrupt(s) or immediately to the
+ current domain if the interrupt has been marked as
+ 'sticky'. This search does not go beyond the current domain in
+ the pipeline. To understand this code properly, one must keep
+ in mind that domains having a higher priority than the current
+ one are sleeping on the adeos_suspend_domain() service. In
+ addition, domains having a lower priority have been preempted
+ by an interrupt dispatched to a higher priority domain. Once
+ the first and highest priority stage has been selected here,
+ the subsequent stages will be activated in turn when each
+ visited domain calls adeos_suspend_domain() to wake up its
+ neighbour down the pipeline. */
+
+ __adeos_walk_pipeline(head,cpuid);
+}
+
+/* ADEOS's version of the interrupt trap handler. */
+
+int __adeos_grab_irq (struct pt_regs *regs)
+
+{
+ extern int ppc_spurious_interrupts;
+ adeos_declare_cpuid;
+ int irq;
+
+ if (!adp_pipelined)
+ {
+ do_IRQ(regs);
+ return 1;
+ }
+
+ irq = ppc_md.get_irq(regs);
+ if (irq >= 0)
+ {
+ __adeos_handle_irq(irq,regs);
+ }
+ else
+ ppc_spurious_interrupts++;
+
+ adeos_load_cpuid();
+
+ return (adp_cpu_current[cpuid] == adp_root &&
+ !test_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status));
+}
+
+/* ADEOS's version of irq.c:do_IRQ(). */
+
+void __adeos_do_IRQ (int irq, struct pt_regs *regs) {
+ irq_enter();
+ ppc_irq_dispatch_handler(regs, irq);
+ irq_exit();
+}
+
+/* ADEOS's version of the decrementer trap handler. */
+
+int __adeos_grab_timer (struct pt_regs *regs)
+
+{
+ adeos_declare_cpuid;
+
+ if (!adp_pipelined)
+ {
+ timer_interrupt(regs);
+ return 1;
+ }
+
+ /* On 970 CPUs DEC cannot be disabled and without setting DEC
+ * here, DEC interrupt would be triggered as soon as interrupts are
+ * enabled in __adeos_sync_stage
+ */
+ set_dec(0x7fffffff);
+
+ __adeos_irq_regs.msr = regs->msr; /* for do_timer() */
+
+ __adeos_handle_irq(ADEOS_TIMER_VIRQ,regs);
+
+ adeos_load_cpuid();
+
+ if (__adeos_decr_ticks != tb_ticks_per_jiffy)
+ {
+ unsigned long next_date, now;
+
+ next_date = __adeos_decr_next[cpuid];
+
+ while ((now = __adeos_read_timebase()) >= next_date)
+ next_date += __adeos_decr_ticks;
+
+ set_dec(next_date - now);
+
+ __adeos_decr_next[cpuid] = next_date;
+ }
+
+ return (adp_cpu_current[cpuid] == adp_root &&
+ !test_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status));
+}
+
+void __adeos_do_timer (int irq, struct pt_regs *regs)
+
+{
+ timer_interrupt(regs);
+}
+
+asmlinkage int __adeos_check_root (struct pt_regs *regs)
+
+{
+ adeos_declare_cpuid;
+ /* This routine is called with hw interrupts off, so no migration
+ can occur while checking the identity of the current domain. */
+ adeos_load_cpuid();
+ return (adp_cpu_current[cpuid] == adp_root &&
+ !test_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status));
+}
+
+/* adeos_trigger_irq() -- Push the interrupt to the pipeline entry
+ just like if it has been actually received from a hw source. This
+ both works for real and virtual interrupts. This also means that
+ the current domain might be immediately preempted by a higher
+ priority domain who happens to handle this interrupt. */
+
+int adeos_trigger_irq (unsigned irq)
+
+{
+ struct pt_regs regs;
+ unsigned long flags;
+
+ if (irq >= IPIPE_NR_IRQS ||
+ (adeos_virtual_irq_p(irq) && !test_bit(irq -
IPIPE_VIRQ_BASE,&__adeos_virtual_irq_map)))
+ return -EINVAL;
+
+ adeos_hw_local_irq_save(flags);
+
+ regs.msr = flags;
+
+ __adeos_handle_irq(irq | ADEOS_IRQ_ACKED, ®s);
+
+ adeos_hw_local_irq_restore(flags);
+
+ return 1;
+}
+
+int __adeos_enter_syscall (struct pt_regs *regs)
+
+{
+ /* By convention, this routine should either return:
+ 0 -- if the syscall is to be passed to Linux;
+ 1 -- if the syscall should not be passed to Linux, and no
+ tail work should be performed;
+ -1 -- if the syscall should not be passed to Linux but the
+ tail work has to be performed. */
+
+ if (__adeos_event_monitors[ADEOS_SYSCALL_PROLOGUE] > 0 &&
+ __adeos_handle_event(ADEOS_SYSCALL_PROLOGUE,regs) > 0)
+ return adp_current == adp_root && !in_atomic() ? -1 : 1;
+
+ return 0;
+}
+
+int __adeos_exit_syscall (void)
+
+{
+ if (__adeos_event_monitors[ADEOS_SYSCALL_EPILOGUE] > 0)
+ return __adeos_handle_event(ADEOS_SYSCALL_EPILOGUE,NULL);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(__adeos_init_stage);
+EXPORT_SYMBOL(__adeos_sync_stage);
+EXPORT_SYMBOL(__adeos_irq_regs);
+#ifdef CONFIG_ADEOS_THREADS
+EXPORT_SYMBOL(__adeos_switch_domain);
+#endif /* CONFIG_ADEOS_THREADS */
+EXPORT_SYMBOL(__adeos_do_IRQ);
+EXPORT_SYMBOL(__adeos_do_timer);
+EXPORT_SYMBOL(__adeos_decr_ticks);
+EXPORT_SYMBOL(__adeos_decr_next);
+EXPORT_SYMBOL(__adeos_current_threadinfo);
+EXPORT_SYMBOL(adeos_critical_enter);
+EXPORT_SYMBOL(adeos_critical_exit);
+EXPORT_SYMBOL(adeos_trigger_irq);
diff -Nru linux-2.6.10/arch/ppc64/kernel/entry.S
linux-2.6.10-adeos-ppc64-devel/arch/ppc64/kernel/entry.S
--- linux-2.6.10/arch/ppc64/kernel/entry.S 2004-12-24 23:33:49.000000000
+0200
+++ linux-2.6.10-adeos-ppc64-devel/arch/ppc64/kernel/entry.S 2005-06-01
15:51:02.000000000 +0300
@@ -108,6 +108,23 @@
ori r11,r11,MSR_EE
mtmsrd r11,1
+#ifdef CONFIG_ADEOS_CORE
+ addi r3,r1,GPR0
+ bl .__adeos_enter_syscall
+ cmpdi r3,0
+ ld r0,GPR0(r1)
+ ld r3,GPR3(r1)
+ ld r4,GPR4(r1)
+ ld r5,GPR5(r1)
+ ld r6,GPR6(r1)
+ ld r7,GPR7(r1)
+ ld r8,GPR8(r1)
+ ld r9,GPR9(r1)
+ bgt adeos_end_syscall
+ blt syscall_exit_adeos
+ addi r9,r1,STACK_FRAME_OVERHEAD
+#endif /* CONFIG_ADEOS_CORE */
+
#ifdef SHOW_SYSCALLS
bl .do_show_syscall
REST_GPR(0,r1)
@@ -145,7 +162,13 @@
ldx r10,r11,r0 /* Fetch system call handler [ptr] */
mtctr r10
bctrl /* Call handler */
-
+#ifdef CONFIG_ADEOS_CORE
+ std r3,RESULT(r1)
+ bl .__adeos_exit_syscall
+ cmpdi r3,0
+ ld r3,RESULT(r1)
+ bne- syscall_exit_adeos
+#endif /* CONFIG_ADEOS_CORE */
syscall_exit:
#ifdef SHOW_SYSCALLS
std r3,GPR3(r1)
@@ -195,6 +218,39 @@
mtspr SRR1,r8
rfid
b . /* prevent speculative execution */
+#ifdef CONFIG_ADEOS_CORE
+syscall_exit_adeos:
+ ld r5,_CCR(r1)
+ ld r8,_MSR(r1)
+ ld r7,_NIP(r1)
+ stdcx. r0,0,r1 /* ?!? to clear the reservation */
+ andi. r6,r8,MSR_PR
+ ld r4,_LINK(r1)
+ beq- 1f /* only restore r13 if */
+ ld r13,GPR13(r1) /* returning to usermode */
+1: ld r2,GPR2(r1)
+ ld r1,GPR1(r1)
+ li r12,MSR_RI
+ mfmsr r10 /* should this be done here? */
+ andc r10,r10,r12
+ mtmsrd r10,1 /* clear MSR.RI */
+ mtlr r4
+ mtcr r5
+ mtspr SRR0,r7
+ mtspr SRR1,r8
+ rfid
+ b . /* prevent speculative execution */
+#endif /* CONFIG_ADEOS_CORE */
+
+#ifdef CONFIG_ADEOS_CORE
+ .globl adeos_end_syscall
+adeos_end_syscall:
+ mfmsr r10
+ rldicl r10,r10,48,1
+ rotldi r10,r10,16
+ mtmsrd r10,1
+ b syscall_exit_adeos
+#endif /* CONFIG_ADEOS_CORE */
syscall_enosys:
li r3,-ENOSYS
@@ -468,6 +524,13 @@
rotldi r9,r9,16
mtmsrd r9,1 /* Update machine state */
+#ifdef CONFIG_ADEOS_CORE
+ bl .__adeos_check_root
+ cmpdi r3,0
+ mfmsr r10 /* this is used later, might be messed */
+ beq- restore
+#endif /* CONFIG_ADEOS_CORE */
+
#ifdef CONFIG_PREEMPT
clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
li r0,_TIF_NEED_RESCHED /* bits to check */
@@ -844,3 +907,126 @@
blr
#endif /* CONFIG_PPC_MULTIPLATFORM */
+
+#ifdef CONFIG_ADEOS_CORE
+
+_GLOBAL(__adeos_ret_from_except_lite)
+ cmpdi r3,0
+ bne+ .ret_from_except_lite
+ b restore
+
+#ifdef CONFIG_ADEOS_THREADS
+
+/*
+ * r3 = adp_next, r4 = adp_cpu_current[adeos_processor_id()].
+ * NOTE: This code is _not_ SMP-compliant. Always called with hw
+ * interrupts off.
+ * TODO: implement (configure time) support for different ABIs?
+ */
+_GLOBAL(__adeos_switch_domain)
+
+ /* 27*8 = 216 for registers
+ * +8 padding for quad-word alignment as required by spec
+ * = 224 */
+ /* alloc stack frame (store and update r1) */
+ stdu r1,-224-STACK_FRAME_OVERHEAD(r1)
+
+ /* Save general purpose registers. (22) */
+ std r31,STACK_FRAME_OVERHEAD+0*8(r1)
+ std r30,STACK_FRAME_OVERHEAD+1*8(r1)
+ std r29,STACK_FRAME_OVERHEAD+2*8(r1)
+ std r28,STACK_FRAME_OVERHEAD+3*8(r1)
+ std r27,STACK_FRAME_OVERHEAD+4*8(r1)
+ std r26,STACK_FRAME_OVERHEAD+5*8(r1)
+ std r25,STACK_FRAME_OVERHEAD+6*8(r1)
+ std r24,STACK_FRAME_OVERHEAD+7*8(r1)
+ std r23,STACK_FRAME_OVERHEAD+8*8(r1)
+ std r22,STACK_FRAME_OVERHEAD+9*8(r1)
+ std r21,STACK_FRAME_OVERHEAD+10*8(r1)
+ std r20,STACK_FRAME_OVERHEAD+11*8(r1)
+ std r19,STACK_FRAME_OVERHEAD+12*8(r1)
+ std r18,STACK_FRAME_OVERHEAD+13*8(r1)
+ std r17,STACK_FRAME_OVERHEAD+14*8(r1)
+ std r16,STACK_FRAME_OVERHEAD+15*8(r1)
+ std r15,STACK_FRAME_OVERHEAD+16*8(r1)
+ std r14,STACK_FRAME_OVERHEAD+17*8(r1)
+ std r13,STACK_FRAME_OVERHEAD+18*8(r1)
+ std r3,STACK_FRAME_OVERHEAD+19*8(r1)
+ std r2,STACK_FRAME_OVERHEAD+20*8(r1)
+ std r0,STACK_FRAME_OVERHEAD+21*8(r1)
+
+ /* Save special registers. (5) */
+ mfctr r2
+ std r2,STACK_FRAME_OVERHEAD+22*8(r1)
+ mfcr r2
+ std r2,STACK_FRAME_OVERHEAD+23*8(r1)
+ mfxer r2
+ std r2,STACK_FRAME_OVERHEAD+24*8(r1)
+ mflr r2
+ std r2,STACK_FRAME_OVERHEAD+25*8(r1)
+ mfmsr r2
+ std r2,STACK_FRAME_OVERHEAD+26*8(r1)
+
+ /* Actual switch block. */
+ ld r2,0(r4) /* r2 = old_adp = adp_cpu_current[cpuid] */
+ std r1,0(r2) /* old_adp->esp[0] = sp */
+ std r3,0(r4) /* adp_cpu_current[cpuid] = new_adp */
+ /* CONFIG_SMP should sync here; but first, accesses to esp[]
+ would require cpuid-indexing. */
+ ld r1,0(r3) /* sp = new_adp->esp[0] */
+
+ /* Restore special registers. */
+ ld r2,STACK_FRAME_OVERHEAD+26*8(r1)
+ mtmsrd r2
+ ld r2,STACK_FRAME_OVERHEAD+25*8(r1)
+ mtlr r2
+ ld r2,STACK_FRAME_OVERHEAD+24*8(r1)
+ mtxer r2
+ ld r2,STACK_FRAME_OVERHEAD+23*8(r1)
+ mtcr r2
+ ld r2,STACK_FRAME_OVERHEAD+22*8(r1)
+ mtctr r2
+
+ /* Restore general purpose registers. */
+ ld r0,STACK_FRAME_OVERHEAD+21*8(r1)
+ ld r2,STACK_FRAME_OVERHEAD+20*8(r1)
+ ld r3,STACK_FRAME_OVERHEAD+19*8(r1)
+ ld r13,STACK_FRAME_OVERHEAD+18*8(r1)
+ ld r14,STACK_FRAME_OVERHEAD+17*8(r1)
+ ld r15,STACK_FRAME_OVERHEAD+16*8(r1)
+ ld r16,STACK_FRAME_OVERHEAD+15*8(r1)
+ ld r17,STACK_FRAME_OVERHEAD+14*8(r1)
+ ld r18,STACK_FRAME_OVERHEAD+13*8(r1)
+ ld r19,STACK_FRAME_OVERHEAD+12*8(r1)
+ ld r20,STACK_FRAME_OVERHEAD+11*8(r1)
+ ld r21,STACK_FRAME_OVERHEAD+10*8(r1)
+ ld r22,STACK_FRAME_OVERHEAD+9*8(r1)
+ ld r23,STACK_FRAME_OVERHEAD+8*8(r1)
+ ld r24,STACK_FRAME_OVERHEAD+7*8(r1)
+ ld r25,STACK_FRAME_OVERHEAD+6*8(r1)
+ ld r26,STACK_FRAME_OVERHEAD+5*8(r1)
+ ld r27,STACK_FRAME_OVERHEAD+4*8(r1)
+ ld r28,STACK_FRAME_OVERHEAD+3*8(r1)
+ ld r29,STACK_FRAME_OVERHEAD+2*8(r1)
+ ld r30,STACK_FRAME_OVERHEAD+1*8(r1)
+ ld r31,STACK_FRAME_OVERHEAD+0*8(r1)
+
+ addi r1,r1,224+STACK_FRAME_OVERHEAD
+
+ blr
+
+#endif /* CONFIG_ADEOS_THREADS */
+
+/* TODO: not touched or checked in any way */
+/* Returns the current threadinfo pointer in a way which is
+ insensitive to the underlying stack, by directly reading the
+ special purpose register #3. */
+
+_GLOBAL(__adeos_current_threadinfo)
+ mfspr r3,SPRG3
+ addi r3,r3,-THREAD
+ tovirt(r3,r3)
+ blr
+
+#endif /* CONFIG_ADEOS_CORE */
+
diff -Nru linux-2.6.10/arch/ppc64/kernel/head.S
linux-2.6.10-adeos-ppc64-devel/arch/ppc64/kernel/head.S
--- linux-2.6.10/arch/ppc64/kernel/head.S 2004-12-24 23:34:48.000000000
+0200
+++ linux-2.6.10-adeos-ppc64-devel/arch/ppc64/kernel/head.S 2005-06-01
15:51:17.000000000 +0300
@@ -381,6 +381,18 @@
bl hdlr; \
b .ret_from_except_lite
+#ifdef CONFIG_ADEOS_CORE
+#define ADEOS_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
+ .align 7; \
+ .globl label##_common; \
+label##_common: \
+ EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
+ DISABLE_INTS; \
+ addi r3,r1,STACK_FRAME_OVERHEAD; \
+ bl hdlr; \
+ b .__adeos_ret_from_except_lite
+#endif /* CONFIG_ADEOS_CORE */
+
/*
* Start of pSeries system interrupt routines
*/
@@ -761,7 +773,12 @@
bl .MachineCheckException
b .ret_from_except
+
+#ifdef CONFIG_ADEOS_CORE
+ ADEOS_EXCEPTION_COMMON_LITE(0x900, Decrementer, .__adeos_grab_timer)
+#else /* !CONFIG_ADEOS_CORE */
STD_EXCEPTION_COMMON_LITE(0x900, Decrementer, .timer_interrupt)
+#endif /* CONFIG_ADEOS_CORE */
STD_EXCEPTION_COMMON(0xa00, Trap_0a, .UnknownException)
STD_EXCEPTION_COMMON(0xb00, Trap_0b, .UnknownException)
STD_EXCEPTION_COMMON(0xd00, SingleStep, .SingleStepException)
@@ -890,8 +907,13 @@
HardwareInterrupt_entry:
DISABLE_INTS
addi r3,r1,STACK_FRAME_OVERHEAD
+#ifdef CONFIG_ADEOS_CORE
+ bl .__adeos_grab_irq
+ b .__adeos_ret_from_except_lite
+#else /* !CONFIG_ADEOS_CORE */
bl .do_IRQ
b .ret_from_except_lite
+#endif /* CONFIG_ADEOS_CORE */
.align 7
.globl Alignment_common
diff -Nru linux-2.6.10/arch/ppc64/kernel/idle.c
linux-2.6.10-adeos-ppc64-devel/arch/ppc64/kernel/idle.c
--- linux-2.6.10/arch/ppc64/kernel/idle.c 2004-12-24 23:35:24.000000000
+0200
+++ linux-2.6.10-adeos-ppc64-devel/arch/ppc64/kernel/idle.c 2005-05-12
13:36:51.000000000 +0300
@@ -131,6 +131,9 @@
while (!need_resched() && !cpu_is_offline(cpu)) {
barrier();
+#ifdef CONFIG_ADEOS_CORE
+ adeos_suspend_domain();
+#endif /* CONFIG_ADEOS_CORE */
/*
* Go into low thread priority and possibly
* low power mode.
@@ -288,8 +291,15 @@
{
while(1) {
/* check CPU type here */
- if (!need_resched())
+ if (!need_resched())
+#ifdef CONFIG_ADEOS_CORE
+ {
+ adeos_suspend_domain();
power4_idle();
+ }
+#else /* !CONFIG_ADEOS_CORE */
+ power4_idle();
+#endif /* CONFIG_ADEOS_CORE */
if (need_resched())
schedule();
}
diff -Nru linux-2.6.10/arch/ppc64/kernel/irq.c
linux-2.6.10-adeos-ppc64-devel/arch/ppc64/kernel/irq.c
--- linux-2.6.10/arch/ppc64/kernel/irq.c 2004-12-24 23:34:32.000000000
+0200
+++ linux-2.6.10-adeos-ppc64-devel/arch/ppc64/kernel/irq.c 2005-05-12
13:36:51.000000000 +0300
@@ -134,14 +134,25 @@
if (desc->status & IRQ_PER_CPU) {
/* no locking required for CPU-local interrupts: */
+#ifdef CONFIG_ADEOS_CORE
+ if (!adp_pipelined)
+ ack_irq(irq);
+#else
ack_irq(irq);
+#endif /* CONFIG_ADEOS_CORE */
action_ret = handle_IRQ_event(irq, regs, desc->action);
desc->handler->end(irq);
return;
}
spin_lock(&desc->lock);
+#ifdef CONFIG_ADEOS_CORE
+ if (!adp_pipelined)
+ ack_irq(irq);
+#else
ack_irq(irq);
+#endif /* CONFIG_ADEOS_CORE */
+
/*
REPLAY is when Linux resends an IRQ that was dropped earlier
WAITING is used by probe to mark irqs that are being tested
diff -Nru linux-2.6.10/arch/ppc64/kernel/Makefile
linux-2.6.10-adeos-ppc64-devel/arch/ppc64/kernel/Makefile
--- linux-2.6.10/arch/ppc64/kernel/Makefile 2004-12-24 23:35:39.000000000
+0200
+++ linux-2.6.10-adeos-ppc64-devel/arch/ppc64/kernel/Makefile 2005-05-12
13:36:51.000000000 +0300
@@ -62,4 +62,6 @@
obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
+obj-$(CONFIG_ADEOS_CORE) += adeos.o
+
CFLAGS_ioctl32.o += -Ifs/
diff -Nru linux-2.6.10/arch/ppc64/kernel/ppc_ksyms.c
linux-2.6.10-adeos-ppc64-devel/arch/ppc64/kernel/ppc_ksyms.c
--- linux-2.6.10/arch/ppc64/kernel/ppc_ksyms.c 2004-12-24 23:34:26.000000000
+0200
+++ linux-2.6.10-adeos-ppc64-devel/arch/ppc64/kernel/ppc_ksyms.c
2005-06-01 15:17:21.000000000 +0300
@@ -163,3 +163,32 @@
EXPORT_SYMBOL(paca);
EXPORT_SYMBOL(cur_cpu_spec);
EXPORT_SYMBOL(systemcfg);
+
+#ifdef CONFIG_ADEOS_CORE
+/* The following are per-platform convenience exports which are needed
+ by some Adeos domains loaded as kernel modules. */
+EXPORT_SYMBOL(tb_ticks_per_jiffy);
+EXPORT_SYMBOL(__switch_to);
+void show_stack(struct task_struct *task,
+ unsigned long *esp);
+EXPORT_SYMBOL(show_stack);
+
+/* these two are needed by the task switching code in fusion */
+extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm);
+extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
+EXPORT_SYMBOL(switch_stab);
+EXPORT_SYMBOL(switch_slb);
+
+/* flush_tlb_pending() */
+EXPORT_PER_CPU_SYMBOL(ppc64_tlb_batch);
+EXPORT_SYMBOL(__flush_tlb_pending);
+
+EXPORT_SYMBOL(_switch);
+#ifdef FEW_CONTEXTS
+EXPORT_SYMBOL(nr_free_contexts);
+EXPORT_SYMBOL(context_mm);
+EXPORT_SYMBOL(steal_context);
+#endif
+extern struct task_struct *last_task_used_math;
+EXPORT_SYMBOL(last_task_used_math);
+#endif /* CONFIG_ADEOS_CORE */
diff -Nru linux-2.6.10/arch/ppc64/kernel/traps.c
linux-2.6.10-adeos-ppc64-devel/arch/ppc64/kernel/traps.c
--- linux-2.6.10/arch/ppc64/kernel/traps.c 2004-12-24 23:34:47.000000000
+0200
+++ linux-2.6.10-adeos-ppc64-devel/arch/ppc64/kernel/traps.c 2005-06-01
16:58:59.000000000 +0300
@@ -75,6 +75,11 @@
if (debugger(regs))
return 1;
+#ifdef CONFIG_ADEOS_CORE
+ /* lets us see Oopses from other domains, too */
+ if (adp_current != adp_root)
+ adeos_set_printk_sync(adp_current);
+#endif /* CONFIG_ADEOS_CORE */
console_verbose();
spin_lock_irq(&die_lock);
bust_spinlocks(1);
@@ -185,9 +190,20 @@
}
#endif
+#ifdef CONFIG_ADEOS_CORE
+static inline int __adeos_pipeline_trap(int trap, struct pt_regs *regs)
+{
+ return __adeos_event_monitors[trap] > 0 ? __adeos_handle_event(trap,regs)
: 0;
+}
+#endif /* CONFIG_ADEOS_CORE */
+
void
SystemResetException(struct pt_regs *regs)
{
+#ifdef CONFIG_ADEOS_CORE
+ if (__adeos_pipeline_trap(ADEOS_SYSRESET_TRAP,regs))
+ return;
+#endif /* CONFIG_ADEOS_CORE */
#ifdef CONFIG_PPC_PSERIES
if (fwnmi_active) {
struct rtas_error_log *errhdr = FWNMI_get_errinfo(regs);
@@ -265,7 +281,11 @@
return;
}
#endif
-
+#ifdef CONFIG_ADEOS_CORE
+ if (__adeos_pipeline_trap(ADEOS_MCE_TRAP,regs))
+ return;
+#endif /* CONFIG_ADEOS_CORE */
+
if (debugger_fault_handler(regs))
return;
die("Machine check", regs, 0);
@@ -278,6 +298,11 @@
void
UnknownException(struct pt_regs *regs)
{
+#ifdef CONFIG_ADEOS_CORE
+ if (__adeos_pipeline_trap(ADEOS_UNKNOWN_TRAP,regs))
+ return;
+#endif /* CONFIG_ADEOS_CORE */
+
printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
regs->nip, regs->msr, regs->trap);
@@ -289,12 +314,20 @@
{
if (debugger_iabr_match(regs))
return;
+#ifdef CONFIG_ADEOS_CORE
+ if (__adeos_pipeline_trap(ADEOS_IABR_TRAP,regs))
+ return;
+#endif /* CONFIG_ADEOS_CORE */
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
}
void
SingleStepException(struct pt_regs *regs)
{
+#ifdef CONFIG_ADEOS_CORE
+ if (__adeos_pipeline_trap(ADEOS_SSTEP_TRAP,regs))
+ return;
+#endif /* CONFIG_ADEOS_CORE */
regs->msr &= ~MSR_SE; /* Turn off 'trace' bit */
if (debugger_sstep(regs))
@@ -459,6 +492,11 @@
void
ProgramCheckException(struct pt_regs *regs)
{
+#ifdef CONFIG_ADEOS_CORE
+ if (__adeos_pipeline_trap(ADEOS_PCE_TRAP,regs))
+ return;
+#endif /* CONFIG_ADEOS_CORE */
+
if (regs->msr & 0x100000) {
/* IEEE FP exception */
parse_fpe(regs);
@@ -500,6 +538,10 @@
void KernelFPUnavailableException(struct pt_regs *regs)
{
+#ifdef CONFIG_ADEOS_CORE
+ if (__adeos_pipeline_trap(ADEOS_KFPUNAVAIL_TRAP,regs))
+ return;
+#endif /* CONFIG_ADEOS_CORE */
printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
"%lx at %lx\n", regs->trap, regs->nip);
die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
@@ -507,6 +549,11 @@
void AltivecUnavailableException(struct pt_regs *regs)
{
+#ifdef CONFIG_ADEOS_CORE
+ if (__adeos_pipeline_trap(ADEOS_ALTUNAVAIL_TRAP,regs))
+ return;
+#endif /* CONFIG_ADEOS_CORE */
+
#ifndef CONFIG_ALTIVEC
if (user_mode(regs)) {
/* A user program has executed an altivec instruction,
@@ -539,6 +586,10 @@
void
PerformanceMonitorException(struct pt_regs *regs)
{
+#ifdef CONFIG_ADEOS_CORE
+ if (__adeos_pipeline_trap(ADEOS_PERFMON_TRAP,regs))
+ return;
+#endif /* CONFIG_ADEOS_CORE */
perf_irq(regs);
}
@@ -554,7 +605,12 @@
emulate_single_step(regs);
return;
}
-
+#ifdef CONFIG_ADEOS_CORE
+ /* Assume that fixing alignment can always be done regardless
+ of the current domain. */
+ if (__adeos_pipeline_trap(ADEOS_ALIGNMENT_TRAP,regs))
+ return;
+#endif /* CONFIG_ADEOS_CORE */
/* Operand address was bad */
if (fixed == -EFAULT) {
if (user_mode(regs)) {
@@ -577,6 +633,11 @@
int err;
siginfo_t info;
+#ifdef CONFIG_ADEOS_CORE
+ if (__adeos_pipeline_trap(ADEOS_ALTASSIST_TRAP,regs))
+ return;
+#endif /* CONFIG_ADEOS_CORE */
+
if (!user_mode(regs)) {
printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
" at %lx\n", regs->nip);
@@ -618,6 +679,10 @@
*/
void unrecoverable_exception(struct pt_regs *regs)
{
+#ifdef CONFIG_ADEOS_CORE
+ if (__adeos_pipeline_trap(ADEOS_NREC_TRAP,regs))
+ return;
+#endif /* CONFIG_ADEOS_CORE */
printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
regs->trap, regs->nip);
die("Unrecoverable exception", regs, SIGABRT);
diff -Nru linux-2.6.10/arch/ppc64/mm/fault.c
linux-2.6.10-adeos-ppc64-devel/arch/ppc64/mm/fault.c
--- linux-2.6.10/arch/ppc64/mm/fault.c 2004-12-24 23:35:23.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-devel/arch/ppc64/mm/fault.c 2005-05-12
13:36:51.000000000 +0300
@@ -95,6 +95,12 @@
BUG_ON((trap == 0x380) || (trap == 0x480));
+#ifdef CONFIG_ADEOS_CORE
+ if (__adeos_event_monitors[ADEOS_ACCESS_TRAP] > 0 &&
+ __adeos_handle_event(ADEOS_ACCESS_TRAP,regs) != 0)
+ return 0;
+#endif /* CONFIG_ADEOS_CORE */
+
if (trap == 0x300) {
if (debugger_fault_handler(regs))
return 0;
diff -Nru linux-2.6.10/Documentation/adeos.txt
linux-2.6.10-adeos-ppc64-devel/Documentation/adeos.txt
--- linux-2.6.10/Documentation/adeos.txt 1970-01-01 02:00:00.000000000
+0200
+++ linux-2.6.10-adeos-ppc64-devel/Documentation/adeos.txt 2005-05-12
13:36:51.000000000 +0300
@@ -0,0 +1,176 @@
+
+The Adeos nanokernel is based on research and publications made in the
+early '90s on the subject of nanokernels. Our basic method was to
+reverse the approach described in most of the papers on the subject.
+Instead of first building the nanokernel and then building the client
+OSes, we started from a live and known-to-be-functional OS, Linux, and
+inserted a nanokernel beneath it. Starting from Adeos, other client
+OSes can now be put side-by-side with the Linux kernel.
+
+To this end, Adeos enables multiple domains to exist simultaneously on
+the same hardware. None of these domains see each other, but all of
+them see Adeos. A domain is most probably a complete OS, but there is
+no assumption being made regarding the sophistication of what's in
+a domain.
+
+To share the hardware among the different OSes, Adeos implements an
+interrupt pipeline (ipipe). Every OS domain has an entry in the ipipe.
+Each interrupt that comes in the ipipe is passed on to every domain
+in the ipipe. Instead of disabling/enabling interrupts, each domain
+in the pipeline only needs to stall/unstall his pipeline stage. If
+an ipipe stage is stalled, then the interrupts do not progress in the
+ipipe until that stage has been unstalled. Each stage of the ipipe
+can, of course, decide to do a number of things with an interrupt.
+Among other things, it can decide that it's the last recipient of the
+interrupt. In that case, the ipipe does not propagate the interrupt
+to the rest of the domains in the ipipe.
+
+Regardless of the operations being done in the ipipe, the Adeos code
+does __not__ play with the interrupt masks. The only case where the
+hardware masks are altered is during the addition/removal of a domain
+from the ipipe. This also means that no OS is allowed to use the real
+hardware cli/sti. But this is OK, since the stall/unstall calls
+achieve the same functionality.
+
+Our approach is based on the following papers (links to these
+papers are provided at the bottom of this message):
+[1] D. Probert, J. Bruno, and M. Karzaorman. "Space: a new approach to
+operating system abstraction." In: International Workshop on Object
+Orientation in Operating Systems, pages 133-137, October 1991.
+[2] D. Probert, J. Bruno. "Building fundamentally extensible application-
+specific operating systems in Space", March 1995.
+[3] D. Cheriton, K. Duda. "A caching model of operating system kernel
+functionality". In: Proc. Symp. on Operating Systems Design and
+Implementation, pages 179-194, Monterey CA (USA), 1994.
+[4] D. Engler, M. Kaashoek, and J. O'Toole Jr. "Exokernel: an operating
+system architecture for application-specific resource management",
+December 1995.
+
+If you don't want to go fetch the complete papers, here's a summary.
+The first 2 discuss the Space nanokernel, the 3rd discussed the cache
+nanokernel, and the last discusses exokernel.
+
+The complete Adeos approach has been thoroughly documented in a whitepaper
+published more than a year ago entitled "Adaptive Domain Environment
+for Operating Systems" and available here: http://www.opersys.com/adeos
+The current implementation is slightly different. Mainly, we do not
+implement the functionality to move Linux out of ring 0. Although of
+interest, this approach is not very portable.
+
+Instead, our patch taps right into Linux's main source of control
+over the hardware, the interrupt dispatching code, and inserts an
+interrupt pipeline which can then serve all the nanokernel's clients,
+including Linux.
+
+This is not a novelty in itself. Other OSes have been modified in such
+a way for a wide range of purposes. One of the most interesting
+examples is described by Stodolsky, Chen, and Bershad in a paper
+entitled "Fast Interrupt Priority Management in Operating System
+Kernels" published in 1993 as part of the Usenix Microkernels and
+Other Kernel Architectures Symposium. In that case, cli/sti were
+replaced by virtual cli/sti which did not modify the real interrupt
+mask in any way. Instead, interrupts were defered and delivered to
+the OS upon a call to the virtualized sti.
+
+Mainly, this resulted in increased performance for the OS. Although
+we haven't done any measurements on Linux's interrupt handling
+performance with Adeos, our nanokernel includes by definition the
+code implementing the technique described in the abovementioned
+Stodolsky paper, which we use to redirect the hardware interrupt flow
+to the pipeline.
+
+i386 and armnommu are currently supported. Most of the
+architecture-dependent code is easily portable to other architectures.
+
+Aside of adding the Adeos module (driver/adeos), we also modified some
+files to tap into Linux interrupt and system event dispatching (all
+the modifications are encapsulated in #ifdef CONFIG_ADEOS_*/#endif).
+
+We modified the idle task so it gives control back to Adeos in order for
+the ipipe to continue propagation.
+
+We modified init/main.c to initialize Adeos very early in the startup.
+
+Of course, we also added the appropriate makefile modifications and
+config options so that you can choose to enable/disable Adeos as
+part of the kernel build configuration.
+
+Adeos' public API is fully documented here:
+http://www.freesoftware.fsf.org/adeos/doc/api/index.html.
+
+In Linux's case, adeos_register_domain() is called very early during
+system startup.
+
+To add your domain to the ipipe, you need to:
+1) Register your domain with Adeos using adeos_register_domain()
+2) Call adeos_virtualize_irq() for all the IRQs you wish to be
+notified about in the ipipe.
+
+That's it. Provided you gave Adeos appropriate handlers in step
+#2, your interrupts will be delivered via the ipipe.
+
+During runtime, you may change your position in the ipipe using
+adeos_renice_domain(). You may also stall/unstall the pipeline
+and change the ipipe's handling of the interrupts according to your
+needs.
+
+Adeos supports SMP, and APIC support on UP.
+
+Here are some of the possible uses for Adeos (this list is far
+from complete):
+1) Much like User-Mode Linux, it should now be possible to have 2
+Linux kernels living side-by-side on the same hardware. In contrast
+to UML, this would not be 2 kernels one ontop of the other, but
+really side-by-side. Since Linux can be told at boot time to use
+only one portion of the available RAM, on a 128MB machine this
+would mean that the first could be made to use the 0-64MB space and
+the second would use the 64-128MB space. We realize that many
+modifications are required. Among other things, one of the 2 kernels
+will not need to conduct hardware initialization. Nevertheless, this
+possibility should be studied closer.
+
+2) It follows from #1 that adding other kernels beside Linux should
+be feasible. BSD is a prime candidate, but it would also be nice to
+see what virtualizers such as VMWare and Plex86 could do with Adeos.
+Proprietary operating systems could potentially also be accomodated.
+
+3) All the previous work that has been done on nanokernels should now
+be easily ported to Linux. Mainly, we would be very interested to
+hear about extensions to Adeos. Primarily, we have no mechanisms
+currently enabling multiple domains to share information. The papers
+mentioned earlier provide such mechanisms, but we'd like to see
+actual practical examples.
+
+4) Kernel debuggers' main problem (tapping into the kernel's
+interrupts) is solved and it should then be possible to provide
+patchless kernel debuggers. They would then become loadable kernel
+modules.
+
+5) Drivers who require absolute priority and dislike other kernel
+portions who use cli/sti can now create a domain of their own
+and place themselves before Linux in the ipipe. This provides a
+mechanism for the implementation of systems that can provide guaranteed
+realtime response.
+
+Philippe Gerum <[EMAIL PROTECTED]>
+Karim Yaghmour <[EMAIL PROTECTED]>
+
+----------------------------------------------------------------------
+Links to papers:
+1-
+http://citeseer.nj.nec.com/probert91space.html
+ftp://ftp.cs.ucsb.edu/pub/papers/space/iwooos91.ps.gz (not working)
+http://www4.informatik.uni-erlangen.de/~tsthiel/Papers/Space-iwooos91.ps.gz
+
+2-
+http://www.cs.ucsb.edu/research/trcs/abstracts/1995-06.shtml
+http://www4.informatik.uni-erlangen.de/~tsthiel/Papers/Space-trcs95-06.ps.gz
+
+3-
+http://citeseer.nj.nec.com/kenneth94caching.html
+http://guir.cs.berkeley.edu/projects/osprelims/papers/cachmodel-OSkernel.ps.gz
+
+4-
+http://citeseer.nj.nec.com/engler95exokernel.html
+ftp://ftp.cag.lcs.mit.edu/multiscale/exokernel.ps.Z
+----------------------------------------------------------------------
diff -Nru linux-2.6.10/include/asm-ppc64/adeos.h
linux-2.6.10-adeos-ppc64-devel/include/asm-ppc64/adeos.h
--- linux-2.6.10/include/asm-ppc64/adeos.h 1970-01-01 02:00:00.000000000
+0200
+++ linux-2.6.10-adeos-ppc64-devel/include/asm-ppc64/adeos.h 2005-06-01
17:05:31.000000000 +0300
@@ -0,0 +1,437 @@
+/*
+ * include/asm-ppc64/adeos.h
+ *
+ * Adeos 64-bit PowerPC adoption
+ * Copyright (C) 2005 Taneli Vähäkangas and Heikki Lindholm
+ * based on previous work:
+ *
+ * Copyright (C) 2004 Philippe Gerum.
+ *
+ * Adeos/PPC port over 2.6 based on the previous 2.4 implementation by:
+ *
+ * Copyright (C) 2004 Wolfgang Grandegger.
+ *
+ * It follows closely the ARM and x86 ports of ADEOS.
+ *
+ * Copyright (C) 2002 Philippe Gerum.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __PPC64_ADEOS_H
+#define __PPC64_ADEOS_H
+
+#include <asm/ptrace.h>
+#include <asm/irq.h>
+#include <asm/bitops.h>
+#include <linux/list.h>
+#include <linux/cpumask.h>
+#include <linux/threads.h>
+
+#define ADEOS_ARCH_STRING "r6/ppc64"
+#define ADEOS_MAJOR_NUMBER 6
+#define ADEOS_MINOR_NUMBER 255
+
+#define ADEOS_IRQ_ACKED 0x1000
+#define ADEOS_IRQ_ACKED_MASK (ADEOS_IRQ_ACKED - 1)
+
+#ifdef CONFIG_SMP
+
+#error "Adeos/ppc64: SMP not yet implemented"
+
+#define ADEOS_NR_CPUS NR_CPUS
+#define ADEOS_CRITICAL_IPI 0
+
+#define adeos_processor_id() (__adeos_current_threadinfo()->cpu)
+
+#define adeos_declare_cpuid int cpuid
+#define adeos_load_cpuid() do { \
+ (cpuid) = adeos_processor_id(); \
+ } while(0)
+#define adeos_lock_cpu(flags) do { \
+ adeos_hw_local_irq_save(flags); \
+ (cpuid) = adeos_processor_id(); \
+ } while(0)
+#define adeos_unlock_cpu(flags) adeos_hw_local_irq_restore(flags)
+#define adeos_get_cpu(flags) adeos_lock_cpu(flags)
+#define adeos_put_cpu(flags) adeos_unlock_cpu(flags)
+#define adp_current (adp_cpu_current[adeos_processor_id()])
+
+#else /* !CONFIG_SMP */
+
+#define ADEOS_NR_CPUS 1
+#define adeos_processor_id() 0
+/* Array references using this index should be optimized out. */
+#define adeos_declare_cpuid const int cpuid = 0
+#define adeos_load_cpuid() /* nop */
+#define adeos_lock_cpu(flags) adeos_hw_local_irq_save(flags)
+#define adeos_unlock_cpu(flags) adeos_hw_local_irq_restore(flags)
+#define adeos_get_cpu(flags) do { flags = flags; } while(0)
+#define adeos_put_cpu(flags) /* nop */
+#define adp_current (adp_cpu_current[0])
+
+#endif /* CONFIG_SMP */
+
+ /* PPC traps */
+#define ADEOS_ACCESS_TRAP 0 /* Data or instruction access exception
*/
+#define ADEOS_ALIGNMENT_TRAP 1 /* Alignment exception */
+#define ADEOS_ALTUNAVAIL_TRAP 2 /* Altivec unavailable */
+#define ADEOS_PCE_TRAP 3 /* Program check exception */
+#define ADEOS_MCE_TRAP 4 /* Machine check exception */
+#define ADEOS_UNKNOWN_TRAP 5 /* Unknown exception */
+#define ADEOS_IABR_TRAP 6 /* Instruction breakpoint */
+#define ADEOS_SSTEP_TRAP 7 /* Single-step exception */
+#define ADEOS_NREC_TRAP 8 /* Non-recoverable exception */
+#define ADEOS_ALTASSIST_TRAP 9 /* Altivec assist exception */
+#define ADEOS_SYSRESET_TRAP 10 /* System reset exception */
+#define ADEOS_KFPUNAVAIL_TRAP 11 /* Kernel FP Unavailable exception */
+#define ADEOS_PERFMON_TRAP 12 /* Performance Monitor exception */
+#define ADEOS_NR_FAULTS 13
+/* Pseudo-vectors used for kernel events */
+#define ADEOS_FIRST_KEVENT ADEOS_NR_FAULTS
+#define ADEOS_SYSCALL_PROLOGUE (ADEOS_FIRST_KEVENT)
+#define ADEOS_SYSCALL_EPILOGUE (ADEOS_FIRST_KEVENT + 1)
+#define ADEOS_SCHEDULE_HEAD (ADEOS_FIRST_KEVENT + 2)
+#define ADEOS_SCHEDULE_TAIL (ADEOS_FIRST_KEVENT + 3)
+#define ADEOS_ENTER_PROCESS (ADEOS_FIRST_KEVENT + 4)
+#define ADEOS_EXIT_PROCESS (ADEOS_FIRST_KEVENT + 5)
+#define ADEOS_SIGNAL_PROCESS (ADEOS_FIRST_KEVENT + 6)
+#define ADEOS_KICK_PROCESS (ADEOS_FIRST_KEVENT + 7)
+#define ADEOS_RENICE_PROCESS (ADEOS_FIRST_KEVENT + 8)
+#define ADEOS_USER_EVENT (ADEOS_FIRST_KEVENT + 9)
+#define ADEOS_LAST_KEVENT (ADEOS_USER_EVENT)
+
+#define ADEOS_NR_EVENTS (ADEOS_LAST_KEVENT + 1)
+
+typedef struct adevinfo {
+
+ unsigned domid;
+ unsigned event;
+ void *evdata;
+
+ volatile int propagate; /* Private */
+
+} adevinfo_t;
+
+typedef struct adsysinfo {
+
+ int ncpus; /* Number of CPUs on board */
+
+ u64 cpufreq; /* CPU frequency (in Hz) */
+
+ /* Arch-dependent block */
+
+ struct {
+ unsigned tmirq; /* Decrementer virtual IRQ */
+ u64 tmfreq; /* Timebase frequency */
+ } archdep;
+
+} adsysinfo_t;
+
+#define IPIPE_NR_XIRQS NR_IRQS
+/* Number of virtual IRQs */
+#define IPIPE_NR_VIRQS BITS_PER_LONG
+/* First virtual IRQ # */
+#define IPIPE_VIRQ_BASE (((IPIPE_NR_XIRQS + BITS_PER_LONG - 1) /
BITS_PER_LONG) * BITS_PER_LONG)
+/* Total number of IRQ slots */
+#define IPIPE_NR_IRQS (IPIPE_VIRQ_BASE + IPIPE_NR_VIRQS)
+/* Number of indirect words needed to map the whole IRQ space. */
+#define IPIPE_IRQ_IWORDS ((IPIPE_NR_IRQS + BITS_PER_LONG - 1) / BITS_PER_LONG)
+#define IPIPE_IRQ_IMASK (BITS_PER_LONG - 1)
+#define IPIPE_IRQ_ISHIFT 6 /* 2^6 for 64bits arch. */
+
+#define IPIPE_IRQMASK_ANY (~0L)
+#define IPIPE_IRQMASK_VIRT (IPIPE_IRQMASK_ANY << (IPIPE_VIRQ_BASE /
BITS_PER_LONG))
+
+/* The first virtual interrupt is reserved for the timer (see
+ __adeos_init_platform). */
+#define ADEOS_TIMER_VIRQ IPIPE_VIRQ_BASE
+
+typedef struct adomain {
+
+ /* -- Section: offset-based references are made on these fields
+ from inline assembly code. Please don't move or reorder. */
+#ifdef CONFIG_ADEOS_THREADS
+ unsigned long esp[ADEOS_NR_CPUS]; /* Domain stack pointers */
+#endif /* CONFIG_ADEOS_THREADS */
+ void (*dswitch)(void); /* Domain switch hook */
+ /* -- End of section. */
+
+ struct list_head p_link; /* Link in pipeline */
+
+ struct adcpudata {
+ unsigned long status;
+ unsigned long irq_pending_hi;
+ unsigned long irq_pending_lo[IPIPE_IRQ_IWORDS];
+ unsigned irq_hits[IPIPE_NR_IRQS];
+#ifdef CONFIG_ADEOS_THREADS
+ adevinfo_t event_info;
+#endif /* CONFIG_ADEOS_THREADS */
+ } cpudata[ADEOS_NR_CPUS];
+
+ struct {
+ int (*acknowledge)(unsigned irq);
+ void (*handler)(unsigned irq);
+ unsigned long control;
+ } irqs[IPIPE_NR_IRQS];
+
+ struct {
+ void (*handler)(adevinfo_t *evinfo);
+ } events[ADEOS_NR_EVENTS];
+
+ struct adomain *m_link; /* Link in mutex sleep queue */
+
+ unsigned long flags;
+
+ unsigned domid;
+
+ const char *name;
+
+ int priority;
+
+ int ptd_keymax;
+ int ptd_keycount;
+ unsigned long ptd_keymap;
+ void (*ptd_setfun)(int, void *);
+ void *(*ptd_getfun)(int);
+
+#ifdef CONFIG_ADEOS_THREADS
+ unsigned long estackbase[ADEOS_NR_CPUS];
+#endif /* CONFIG_ADEOS_THREADS */
+
+} adomain_t;
+
+/* The following macros must be used hw interrupts off. */
+
+#define __adeos_set_irq_bit(adp,cpuid,irq) \
+do { \
+ if (!test_bit(IPIPE_LOCK_FLAG,&(adp)->irqs[irq].control)) { \
+ __set_bit(irq &
IPIPE_IRQ_IMASK,&(adp)->cpudata[cpuid].irq_pending_lo[irq >>
IPIPE_IRQ_ISHIFT]); \
+ __set_bit(irq >>
IPIPE_IRQ_ISHIFT,&(adp)->cpudata[cpuid].irq_pending_hi); \
+ } \
+} while(0)
+
+#define __adeos_clear_pend(adp,cpuid,irq) \
+do { \
+ __clear_bit(irq &
IPIPE_IRQ_IMASK,&(adp)->cpudata[cpuid].irq_pending_lo[irq >>
IPIPE_IRQ_ISHIFT]); \
+ if ((adp)->cpudata[cpuid].irq_pending_lo[irq >> IPIPE_IRQ_ISHIFT] == 0) \
+ __clear_bit(irq >>
IPIPE_IRQ_ISHIFT,&(adp)->cpudata[cpuid].irq_pending_hi); \
+} while(0)
+
+#define __adeos_lock_irq(adp,cpuid,irq) \
+do { \
+ if (!test_and_set_bit(IPIPE_LOCK_FLAG,&(adp)->irqs[irq].control)) \
+ __adeos_clear_pend(adp,cpuid,irq); \
+} while(0)
+
+#define __adeos_unlock_irq(adp,irq) \
+do { \
+ if (test_and_clear_bit(IPIPE_LOCK_FLAG,&(adp)->irqs[irq].control)) { \
+ int __cpuid, __nr_cpus = num_online_cpus(); \
+ for (__cpuid = 0; __cpuid < __nr_cpus; __cpuid++) \
+ if ((adp)->cpudata[__cpuid].irq_hits[irq] > 0) { /* We need atomic
ops next. */ \
+ set_bit(irq &
IPIPE_IRQ_IMASK,&(adp)->cpudata[__cpuid].irq_pending_lo[irq >>
IPIPE_IRQ_ISHIFT]); \
+ set_bit(irq >>
IPIPE_IRQ_ISHIFT,&(adp)->cpudata[__cpuid].irq_pending_hi); \
+ } \
+ } \
+} while(0)
+
+#define __adeos_clear_irq(adp,irq) \
+do { \
+ int __cpuid, __nr_cpus = num_online_cpus(); \
+ clear_bit(IPIPE_LOCK_FLAG,&(adp)->irqs[irq].control); \
+ for (__cpuid = 0; __cpuid < __nr_cpus; __cpuid++) { \
+ (adp)->cpudata[__cpuid].irq_hits[irq] = 0; \
+ __adeos_clear_pend(adp,__cpuid,irq); \
+ } \
+} while(0)
+
+#define adeos_virtual_irq_p(irq) ((irq) >= IPIPE_VIRQ_BASE && \
+ (irq) < IPIPE_NR_IRQS)
+
+static inline void adeos_hw_local_irq_save_ptr(unsigned long *flags)
+{
+ unsigned long msr;
+ msr = mfmsr();
+ *flags = msr;
+ __mtmsrd(msr & ~MSR_EE, 1);
+ __asm__ __volatile__("": : :"memory");
+}
+
+#define adeos_hw_local_irq_save_flags(flags)
adeos_hw_local_irq_save_ptr(&(flags))
+#define adeos_hw_local_irq_restore(flags) do { \
+ __asm__ __volatile__("": : :"memory"); \
+ __mtmsrd((flags), 1); \
+} while(0)
+
+static inline void adeos_hw_local_irq_disable(void)
+{
+ unsigned long msr;
+ msr = mfmsr();
+ __mtmsrd(msr & ~MSR_EE, 1);
+ __asm__ __volatile__("": : :"memory");
+}
+
+static inline void adeos_hw_local_irq_enable(void)
+{
+ unsigned long msr;
+ __asm__ __volatile__("": : :"memory");
+ msr = mfmsr();
+ __mtmsrd(msr | MSR_EE, 1);
+}
+
+#define adeos_hw_local_irq_save(flags)
({adeos_hw_local_irq_save_flags(flags);adeos_hw_local_irq_disable();})
+#define adeos_hw_save_flags_and_sti(flags)
({adeos_hw_local_irq_save_flags(flags);adeos_hw_local_irq_enable();})
+
+#define adeos_hw_cli() adeos_hw_local_irq_disable()
+#define adeos_hw_sti() adeos_hw_local_irq_enable()
+
+#define adeos_hw_local_irq_flags(flags) ((flags) = mfmsr())
+#define adeos_hw_test_iflag(x) ((x) & MSR_EE)
+#define adeos_hw_irqs_disabled() \
+({ \
+ unsigned long flags; \
+ adeos_hw_local_irq_flags(flags);\
+ !adeos_hw_test_iflag(flags); \
+})
+
+#define adeos_hw_tsc(t) (t = mftb())
+
+extern unsigned long tb_ticks_per_jiffy;
+
+#define adeos_cpu_freq() (HZ * tb_ticks_per_jiffy)
+
+#define adeos_spin_lock(x) _raw_spin_lock(x)
+#define adeos_spin_unlock(x) _raw_spin_unlock(x)
+#define adeos_spin_trylock(x) _raw_spin_trylock(x)
+#define adeos_write_lock(x) _raw_write_lock(x)
+#define adeos_write_unlock(x) _raw_write_unlock(x)
+#define adeos_write_trylock(x) _raw_write_trylock(x)
+#define adeos_read_lock(x) _raw_read_lock(x)
+#define adeos_read_unlock(x) _raw_read_unlock(x)
+
+#define adeos_spin_lock_irqsave(x,flags) \
+do { \
+ adeos_hw_local_irq_save(flags); \
+ adeos_spin_lock(x); \
+} while (0)
+
+#define adeos_spin_unlock_irqrestore(x,flags) \
+do { \
+ adeos_spin_unlock(x); \
+ adeos_hw_local_irq_restore(flags); \
+} while (0)
+
+#define adeos_spin_lock_disable(x) \
+do { \
+ adeos_hw_cli(); \
+ adeos_spin_lock(x); \
+} while (0)
+
+#define adeos_spin_unlock_enable(x) \
+do { \
+ adeos_spin_unlock(x); \
+ adeos_hw_sti(); \
+} while (0)
+
+#define adeos_read_lock_irqsave(lock, flags) \
+do { \
+ adeos_hw_local_irq_save(flags); \
+ adeos_read_lock(lock); \
+} while (0)
+
+#define adeos_read_unlock_irqrestore(lock, flags) \
+do { \
+ adeos_read_unlock(lock); \
+ adeos_hw_local_irq_restore(flags); \
+} while (0)
+
+#define adeos_write_lock_irqsave(lock, flags) \
+do { \
+ adeos_hw_local_irq_save(flags); \
+ adeos_write_lock(lock); \
+} while (0)
+
+#define adeos_write_unlock_irqrestore(lock, flags) \
+do { \
+ adeos_write_unlock(lock); \
+ adeos_hw_local_irq_restore(flags); \
+} while (0)
+
+/* Private interface -- Internal use only */
+
+struct adattr;
+
+void __adeos_init(void);
+
+void __adeos_init_domain(adomain_t *adp,
+ struct adattr *attr);
+
+void __adeos_cleanup_domain(adomain_t *adp);
+
+#define __adeos_check_platform() do { } while(0)
+
+#define __adeos_read_timebase() ({ unsigned long t; adeos_hw_tsc(t); t; })
+
+void __adeos_init_platform(void);
+
+void __adeos_enable_pipeline(void);
+
+void __adeos_disable_pipeline(void);
+
+void __adeos_init_stage(adomain_t *adp);
+
+void __adeos_sync_stage(unsigned long syncmask);
+
+int __adeos_ack_irq(unsigned irq);
+
+void __adeos_do_IRQ(int irq,
+ struct pt_regs *regs);
+
+void __adeos_do_timer(int irq,
+ struct pt_regs *regs);
+
+struct thread_info *__adeos_current_threadinfo(void);
+
+#ifdef CONFIG_ADEOS_THREADS
+
+int __adeos_switch_domain(adomain_t *adp,
+ adomain_t **currentp);
+
+/* Called with hw interrupts off. */
+static inline void __adeos_switch_to (adomain_t *out,
+ adomain_t *in,
+ int cpuid)
+{
+ extern adomain_t *adp_cpu_current[];
+
+ __adeos_switch_domain(in,&adp_cpu_current[cpuid]);
+
+ if (out->dswitch != NULL)
+ out->dswitch();
+}
+
+#endif /* CONFIG_ADEOS_THREADS */
+
+extern struct pt_regs __adeos_irq_regs;
+
+extern unsigned long __adeos_virtual_irq_map;
+
+extern unsigned long __adeos_decr_ticks;
+
+extern unsigned long __adeos_decr_next[];
+
+#endif /* !__PPC64_ADEOS_H */
diff -Nru linux-2.6.10/include/asm-ppc64/hw_irq.h
linux-2.6.10-adeos-ppc64-devel/include/asm-ppc64/hw_irq.h
--- linux-2.6.10/include/asm-ppc64/hw_irq.h 2004-12-24 23:35:40.000000000
+0200
+++ linux-2.6.10-adeos-ppc64-devel/include/asm-ppc64/hw_irq.h 2005-05-12
13:36:51.000000000 +0300
@@ -19,6 +19,37 @@
int timer_interrupt(struct pt_regs *);
extern void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq);
+#ifdef CONFIG_ADEOS_CORE
+
+void __adeos_stall_root(void);
+void __adeos_unstall_root(void);
+unsigned long __adeos_test_root(void);
+unsigned long __adeos_test_and_stall_root(void);
+void __adeos_restore_root(unsigned long flags);
+
+#define irqs_disabled() __adeos_test_root()
+
+static inline void local_irq_disable(void) {
+ __adeos_stall_root();
+}
+
+static inline void local_irq_enable(void) {
+ __adeos_unstall_root();
+}
+
+static inline void local_irq_save_ptr(unsigned long *flags) {
+ *flags = __adeos_test_and_stall_root();
+}
+
+static inline void local_irq_restore(unsigned long flags) {
+ __adeos_restore_root(flags);
+}
+
+#define local_save_flags(flags) ((flags) = __adeos_test_root())
+#define local_irq_save(flags) local_irq_save_ptr(&flags)
+
+#else /* !CONFIG_ADEOS_CORE */
+
#ifdef CONFIG_PPC_ISERIES
extern unsigned long local_get_flags(void);
@@ -75,6 +106,8 @@
#endif /* CONFIG_PPC_ISERIES */
+#endif /* CONFIG_ADEOS_CORE */
+
#define mask_irq(irq) \
({ \
irq_desc_t *desc = get_irq_desc(irq); \
diff -Nru linux-2.6.10/include/asm-ppc64/smp.h
linux-2.6.10-adeos-ppc64-devel/include/asm-ppc64/smp.h
--- linux-2.6.10/include/asm-ppc64/smp.h 2004-12-24 23:33:47.000000000
+0200
+++ linux-2.6.10-adeos-ppc64-devel/include/asm-ppc64/smp.h 2005-05-12
13:36:51.000000000 +0300
@@ -37,8 +37,12 @@
struct pt_regs;
extern void smp_message_recv(int, struct pt_regs *);
-
+#ifdef CONFIG_ADEOS_CORE
+#include <asm/adeos.h>
+#define smp_processor_id() adeos_processor_id()
+#else /* !CONFIG_ADEOS_CORE */
#define smp_processor_id() (get_paca()->paca_index)
+#endif /* CONFIG_ADEOS_CORE */
#define hard_smp_processor_id() (get_paca()->hw_cpu_id)
extern cpumask_t cpu_sibling_map[NR_CPUS];
diff -Nru linux-2.6.10/include/linux/adeos.h
linux-2.6.10-adeos-ppc64-devel/include/linux/adeos.h
--- linux-2.6.10/include/linux/adeos.h 1970-01-01 02:00:00.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-devel/include/linux/adeos.h 2005-05-12
13:36:51.000000000 +0300
@@ -0,0 +1,518 @@
+/*
+ * include/linux/adeos.h
+ *
+ * Copyright (C) 2002,2003,2004 Philippe Gerum.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __LINUX_ADEOS_H
+#define __LINUX_ADEOS_H
+
+#include <asm/adeos.h>
+#include <linux/spinlock.h>
+
+#define ADEOS_VERSION_PREFIX "2.6"
+#define ADEOS_VERSION_STRING (ADEOS_VERSION_PREFIX ADEOS_ARCH_STRING)
+#define ADEOS_RELEASE_NUMBER
(0x02060000|((ADEOS_MAJOR_NUMBER&0xff)<<8)|(ADEOS_MINOR_NUMBER&0xff))
+
+#define ADEOS_ROOT_PRI 100
+#define ADEOS_ROOT_ID 0
+#define ADEOS_ROOT_NPTDKEYS 4 /* Must be <= 32 */
+
+#define ADEOS_RESET_TIMER 0x1
+#define ADEOS_SAME_HANDLER ((void (*)(unsigned))(-1))
+
+/* Global domain flags */
+#define ADEOS_SPRINTK_FLAG 0 /* Synchronous printk() allowed */
+#define ADEOS_PPRINTK_FLAG 1 /* Asynchronous printk() request pending */
+
+/* Per-cpu pipeline flags.
+ WARNING: some implementation might refer to those flags
+ non-symbolically in assembly portions (e.g. x86). */
+#define IPIPE_STALL_FLAG 0 /* Stalls a pipeline stage */
+#define IPIPE_XPEND_FLAG 1 /* Exception notification is pending */
+#define IPIPE_SLEEP_FLAG 2 /* Domain has self-suspended */
+#define IPIPE_SYNC_FLAG 3 /* The interrupt syncer is running for the
domain */
+
+#define IPIPE_HANDLE_FLAG 0
+#define IPIPE_PASS_FLAG 1
+#define IPIPE_ENABLE_FLAG 2
+#define IPIPE_DYNAMIC_FLAG IPIPE_HANDLE_FLAG
+#define IPIPE_EXCLUSIVE_FLAG 3
+#define IPIPE_STICKY_FLAG 4
+#define IPIPE_SYSTEM_FLAG 5
+#define IPIPE_LOCK_FLAG 6
+#define IPIPE_SHARED_FLAG 7
+#define IPIPE_CALLASM_FLAG 8 /* Arch-dependent -- might be unused. */
+
+#define IPIPE_HANDLE_MASK (1 << IPIPE_HANDLE_FLAG)
+#define IPIPE_PASS_MASK (1 << IPIPE_PASS_FLAG)
+#define IPIPE_ENABLE_MASK (1 << IPIPE_ENABLE_FLAG)
+#define IPIPE_DYNAMIC_MASK IPIPE_HANDLE_MASK
+#define IPIPE_EXCLUSIVE_MASK (1 << IPIPE_EXCLUSIVE_FLAG)
+#define IPIPE_STICKY_MASK (1 << IPIPE_STICKY_FLAG)
+#define IPIPE_SYSTEM_MASK (1 << IPIPE_SYSTEM_FLAG)
+#define IPIPE_LOCK_MASK (1 << IPIPE_LOCK_FLAG)
+#define IPIPE_SHARED_MASK (1 << IPIPE_SHARED_FLAG)
+#define IPIPE_SYNC_MASK (1 << IPIPE_SYNC_FLAG)
+#define IPIPE_CALLASM_MASK (1 << IPIPE_CALLASM_FLAG)
+
+#define IPIPE_DEFAULT_MASK (IPIPE_HANDLE_MASK|IPIPE_PASS_MASK)
+
+typedef struct adattr {
+
+ unsigned domid; /* Domain identifier -- Magic value set by
caller */
+ const char *name; /* Domain name -- Warning: won't be dup'ed! */
+ int priority; /* Priority in interrupt pipeline */
+ void (*entry)(int); /* Domain entry point */
+ int estacksz; /* Stack size for entry context -- 0 means
unspec */
+ void (*dswitch)(void); /* Handler called each time the domain is
switched in */
+ int nptdkeys; /* Max. number of per-thread data keys */
+ void (*ptdset)(int,void *); /* Routine to set pt values */
+ void *(*ptdget)(int); /* Routine to get pt values */
+
+} adattr_t;
+
+typedef struct admutex {
+
+ spinlock_t lock;
+
+#ifdef CONFIG_ADEOS_THREADS
+ adomain_t *sleepq, /* Pending domain queue */
+ *owner; /* Domain owning the mutex */
+#ifdef CONFIG_SMP
+ volatile int owncpu;
+#define ADEOS_MUTEX_UNLOCKED { SPIN_LOCK_UNLOCKED, NULL, NULL, -1 }
+#else /* !CONFIG_SMP */
+#define ADEOS_MUTEX_UNLOCKED { SPIN_LOCK_UNLOCKED, NULL, NULL }
+#endif /* CONFIG_SMP */
+#else /* !CONFIG_ADEOS_THREADS */
+#define ADEOS_MUTEX_UNLOCKED { SPIN_LOCK_UNLOCKED }
+#endif /* CONFIG_ADEOS_THREADS */
+
+} admutex_t;
+
+extern int adp_pipelined;
+
+extern adomain_t *adp_cpu_current[],
+ *adp_root;
+
+extern int __adeos_event_monitors[];
+
+extern unsigned __adeos_printk_virq;
+
+extern unsigned long __adeos_virtual_irq_map;
+
+extern struct list_head __adeos_pipeline;
+
+extern spinlock_t __adeos_pipelock;
+
+#ifdef CONFIG_ADEOS_PROFILING
+
+typedef struct adprofdata {
+
+ struct {
+ unsigned long long t_handled;
+ unsigned long long t_synced;
+ unsigned long n_handled;
+ unsigned long n_synced;
+ } irqs[IPIPE_NR_IRQS];
+
+} adprofdata_t;
+
+extern adprofdata_t __adeos_profile_data[ADEOS_NR_CPUS];
+
+#endif /* CONFIG_ADEOS_PROFILING */
+
+/* Private interface */
+
+#ifdef CONFIG_PROC_FS
+void __adeos_init_proc(void);
+#endif /* CONFIG_PROC_FS */
+
+void __adeos_takeover(void);
+
+asmlinkage int __adeos_handle_event(unsigned event,
+ void *evdata);
+
+void __adeos_sync_console(unsigned irq);
+
+void __adeos_dump_state(void);
+
+static inline void __adeos_schedule_head(void *evdata) {
+
+ if (__adeos_event_monitors[ADEOS_SCHEDULE_HEAD] > 0)
+ __adeos_handle_event(ADEOS_SCHEDULE_HEAD,evdata);
+}
+
+static inline int __adeos_schedule_tail(void *evdata) {
+
+ if (__adeos_event_monitors[ADEOS_SCHEDULE_TAIL] > 0)
+ return __adeos_handle_event(ADEOS_SCHEDULE_TAIL,evdata);
+
+ return 0;
+}
+
+static inline void __adeos_enter_process(void) {
+
+ if (__adeos_event_monitors[ADEOS_ENTER_PROCESS] > 0)
+ __adeos_handle_event(ADEOS_ENTER_PROCESS,NULL);
+}
+
+static inline void __adeos_exit_process(void *evdata) {
+
+ if (__adeos_event_monitors[ADEOS_EXIT_PROCESS] > 0)
+ __adeos_handle_event(ADEOS_EXIT_PROCESS,evdata);
+}
+
+static inline int __adeos_signal_process(void *evdata) {
+
+ if (__adeos_event_monitors[ADEOS_SIGNAL_PROCESS] > 0)
+ return __adeos_handle_event(ADEOS_SIGNAL_PROCESS,evdata);
+
+ return 0;
+}
+
+static inline void __adeos_kick_process(void *evdata) {
+
+ if (__adeos_event_monitors[ADEOS_KICK_PROCESS] > 0)
+ __adeos_handle_event(ADEOS_KICK_PROCESS,evdata);
+}
+
+static inline int __adeos_renice_process(void *evdata) {
+
+ if (__adeos_event_monitors[ADEOS_RENICE_PROCESS] > 0)
+ return __adeos_handle_event(ADEOS_RENICE_PROCESS,evdata);
+
+ return 0;
+}
+
+void __adeos_stall_root(void);
+
+void __adeos_unstall_root(void);
+
+unsigned long __adeos_test_root(void);
+
+unsigned long __adeos_test_and_stall_root(void);
+
+void fastcall __adeos_restore_root(unsigned long flags);
+
+void __adeos_schedule_back_root(struct task_struct *prev);
+
+void __adeos_setscheduler_root(struct task_struct *p,
+ int policy,
+ int prio);
+
+void __adeos_reenter_root(struct task_struct *prev,
+ int policy,
+ int prio);
+
+int fastcall __adeos_schedule_irq(unsigned irq,
+ struct list_head *head);
+
+#define __adeos_pipeline_head_p(adp) (&(adp)->p_link == __adeos_pipeline.next)
+
+#ifdef CONFIG_ADEOS_THREADS
+
+static inline int __adeos_domain_work_p (adomain_t *adp, int cpuid)
+
+{
+ return (!test_bit(IPIPE_SLEEP_FLAG,&adp->cpudata[cpuid].status) ||
+ (!test_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status) &&
+ adp->cpudata[cpuid].irq_pending_hi != 0) ||
+ test_bit(IPIPE_XPEND_FLAG,&adp->cpudata[cpuid].status));
+}
+
+#else /* !CONFIG_ADEOS_THREADS */
+
+static inline int __adeos_domain_work_p (adomain_t *adp, int cpuid)
+
+{
+ return (!test_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status) &&
+ adp->cpudata[cpuid].irq_pending_hi != 0);
+}
+
+static inline void __adeos_switch_to (adomain_t *out, adomain_t *in, int cpuid)
+
+{
+ void adeos_suspend_domain(void);
+
+ /* "in" is guaranteed to be closer than "out" from the head of the
+ pipeline (and obviously different). */
+
+ adp_cpu_current[cpuid] = in;
+
+ if (in->dswitch)
+ in->dswitch();
+
+ adeos_suspend_domain(); /* Sync stage and propagate interrupts. */
+ adeos_load_cpuid(); /* Processor might have changed. */
+
+ if (adp_cpu_current[cpuid] == in)
+ /* Otherwise, something has changed the current domain under
+ our feet recycling the register set; do not override. */
+ adp_cpu_current[cpuid] = out;
+}
+
+#endif /* CONFIG_ADEOS_THREADS */
+
+/* Public interface */
+
+int adeos_register_domain(adomain_t *adp,
+ adattr_t *attr);
+
+int adeos_unregister_domain(adomain_t *adp);
+
+void adeos_suspend_domain(void);
+
+int adeos_virtualize_irq_from(adomain_t *adp,
+ unsigned irq,
+ void (*handler)(unsigned irq),
+ int (*acknowledge)(unsigned irq),
+ unsigned modemask);
+
+static inline int adeos_virtualize_irq(unsigned irq,
+ void (*handler)(unsigned irq),
+ int (*acknowledge)(unsigned irq),
+ unsigned modemask) {
+
+ return adeos_virtualize_irq_from(adp_current,
+ irq,
+ handler,
+ acknowledge,
+ modemask);
+}
+
+int adeos_control_irq(unsigned irq,
+ unsigned clrmask,
+ unsigned setmask);
+
+cpumask_t adeos_set_irq_affinity(unsigned irq,
+ cpumask_t cpumask);
+
+static inline int adeos_share_irq (unsigned irq, int (*acknowledge)(unsigned
irq)) {
+
+ return adeos_virtualize_irq(irq,
+ ADEOS_SAME_HANDLER,
+ acknowledge,
+
IPIPE_SHARED_MASK|IPIPE_HANDLE_MASK|IPIPE_PASS_MASK);
+}
+
+unsigned adeos_alloc_irq(void);
+
+int adeos_free_irq(unsigned irq);
+
+int fastcall adeos_trigger_irq(unsigned irq);
+
+static inline int adeos_propagate_irq(unsigned irq) {
+
+ return __adeos_schedule_irq(irq,adp_current->p_link.next);
+}
+
+static inline int adeos_schedule_irq(unsigned irq) {
+
+ return __adeos_schedule_irq(irq,&adp_current->p_link);
+}
+
+int fastcall adeos_send_ipi(unsigned ipi,
+ cpumask_t cpumask);
+
+static inline void adeos_stall_pipeline_from (adomain_t *adp)
+
+{
+ adeos_declare_cpuid;
+#ifdef CONFIG_SMP
+ unsigned long flags;
+
+ adeos_lock_cpu(flags);
+
+ __set_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
+
+ if (!__adeos_pipeline_head_p(adp))
+ adeos_unlock_cpu(flags);
+#else /* CONFIG_SMP */
+ set_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
+
+ if (__adeos_pipeline_head_p(adp))
+ adeos_hw_cli();
+#endif /* CONFIG_SMP */
+}
+
+static inline unsigned long adeos_test_pipeline_from (adomain_t *adp)
+
+{
+ unsigned long flags, s;
+ adeos_declare_cpuid;
+
+ adeos_get_cpu(flags);
+ s = test_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
+ adeos_put_cpu(flags);
+
+ return s;
+}
+
+static inline unsigned long adeos_test_and_stall_pipeline_from (adomain_t *adp)
+
+{
+ adeos_declare_cpuid;
+ unsigned long s;
+#ifdef CONFIG_SMP
+ unsigned long flags;
+
+ adeos_lock_cpu(flags);
+
+ s = __test_and_set_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
+
+ if (!__adeos_pipeline_head_p(adp))
+ adeos_unlock_cpu(flags);
+#else /* CONFIG_SMP */
+ s = test_and_set_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
+
+ if (__adeos_pipeline_head_p(adp))
+ adeos_hw_cli();
+#endif /* CONFIG_SMP */
+
+ return s;
+}
+
+void fastcall adeos_unstall_pipeline_from(adomain_t *adp);
+
+static inline unsigned long adeos_test_and_unstall_pipeline_from(adomain_t
*adp)
+
+{
+ unsigned long flags, s;
+ adeos_declare_cpuid;
+
+ adeos_get_cpu(flags);
+ s = test_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
+ adeos_unstall_pipeline_from(adp);
+ adeos_put_cpu(flags);
+
+ return s;
+}
+
+static inline void adeos_unstall_pipeline(void)
+
+{
+ adeos_unstall_pipeline_from(adp_current);
+}
+
+static inline unsigned long adeos_test_and_unstall_pipeline(void)
+
+{
+ return adeos_test_and_unstall_pipeline_from(adp_current);
+}
+
+static inline unsigned long adeos_test_pipeline (void)
+
+{
+ return adeos_test_pipeline_from(adp_current);
+}
+
+static inline unsigned long adeos_test_and_stall_pipeline (void)
+
+{
+ return adeos_test_and_stall_pipeline_from(adp_current);
+}
+
+static inline void adeos_restore_pipeline_from (adomain_t *adp, unsigned long
flags)
+
+{
+ if (flags)
+ adeos_stall_pipeline_from(adp);
+ else
+ adeos_unstall_pipeline_from(adp);
+}
+
+static inline void adeos_stall_pipeline (void)
+
+{
+ adeos_stall_pipeline_from(adp_current);
+}
+
+static inline void adeos_restore_pipeline (unsigned long flags)
+
+{
+ adeos_restore_pipeline_from(adp_current,flags);
+}
+
+static inline void adeos_restore_pipeline_nosync (adomain_t *adp, unsigned
long flags, int cpuid)
+
+{
+ /* If cpuid is current, then it must be held on entry
+ (adeos_get_cpu/adeos_hw_local_irq_save/adeos_hw_cli). */
+
+ if (flags)
+ __set_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
+ else
+ __clear_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
+}
+
+int adeos_catch_event_from(adomain_t *adp,
+ unsigned event,
+ void (*handler)(adevinfo_t *));
+
+static inline int adeos_catch_event (unsigned event, void
(*handler)(adevinfo_t *))
+
+{
+ return adeos_catch_event_from(adp_current,event,handler);
+}
+
+static inline void adeos_propagate_event(adevinfo_t *evinfo)
+
+{
+ evinfo->propagate = 1;
+}
+
+void adeos_init_attr(adattr_t *attr);
+
+int adeos_get_sysinfo(adsysinfo_t *sysinfo);
+
+int adeos_tune_timer(unsigned long ns,
+ int flags);
+
+int adeos_alloc_ptdkey(void);
+
+int adeos_free_ptdkey(int key);
+
+int adeos_set_ptd(int key,
+ void *value);
+
+void *adeos_get_ptd(int key);
+
+unsigned long adeos_critical_enter(void (*syncfn)(void));
+
+void adeos_critical_exit(unsigned long flags);
+
+int adeos_init_mutex(admutex_t *mutex);
+
+int adeos_destroy_mutex(admutex_t *mutex);
+
+unsigned long fastcall adeos_lock_mutex(admutex_t *mutex);
+
+void fastcall adeos_unlock_mutex(admutex_t *mutex,
+ unsigned long flags);
+
+static inline void adeos_set_printk_sync (adomain_t *adp) {
+ set_bit(ADEOS_SPRINTK_FLAG,&adp->flags);
+}
+
+static inline void adeos_set_printk_async (adomain_t *adp) {
+ clear_bit(ADEOS_SPRINTK_FLAG,&adp->flags);
+}
+
+#endif /* !__LINUX_ADEOS_H */
diff -Nru linux-2.6.10/include/linux/init_task.h
linux-2.6.10-adeos-ppc64-devel/include/linux/init_task.h
--- linux-2.6.10/include/linux/init_task.h 2004-12-24 23:33:52.000000000
+0200
+++ linux-2.6.10-adeos-ppc64-devel/include/linux/init_task.h 2005-05-12
13:36:51.000000000 +0300
@@ -65,6 +65,60 @@
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
*/
+#ifdef CONFIG_ADEOS_CORE
+
+#define INIT_TASK(tsk) \
+{ \
+ .state = 0, \
+ .thread_info = &init_thread_info, \
+ .usage = ATOMIC_INIT(2), \
+ .flags = 0, \
+ .lock_depth = -1, \
+ .prio = MAX_PRIO-20, \
+ .static_prio = MAX_PRIO-20, \
+ .policy = SCHED_NORMAL, \
+ .cpus_allowed = CPU_MASK_ALL, \
+ .mm = NULL, \
+ .active_mm = &init_mm, \
+ .run_list = LIST_HEAD_INIT(tsk.run_list), \
+ .time_slice = HZ, \
+ .tasks = LIST_HEAD_INIT(tsk.tasks), \
+ .ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \
+ .ptrace_list = LIST_HEAD_INIT(tsk.ptrace_list), \
+ .real_parent = &tsk, \
+ .parent = &tsk, \
+ .children = LIST_HEAD_INIT(tsk.children), \
+ .sibling = LIST_HEAD_INIT(tsk.sibling), \
+ .group_leader = &tsk, \
+ .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(tsk.wait_chldexit),\
+ .real_timer = { \
+ .function = it_real_fn \
+ }, \
+ .group_info = &init_groups, \
+ .cap_effective = CAP_INIT_EFF_SET, \
+ .cap_inheritable = CAP_INIT_INH_SET, \
+ .cap_permitted = CAP_FULL_SET, \
+ .keep_capabilities = 0, \
+ .user = INIT_USER, \
+ .comm = "swapper", \
+ .thread = INIT_THREAD, \
+ .fs = &init_fs, \
+ .files = &init_files, \
+ .signal = &init_signals, \
+ .sighand = &init_sighand, \
+ .pending = { \
+ .list = LIST_HEAD_INIT(tsk.pending.list), \
+ .signal = {{0}}}, \
+ .blocked = {{0}}, \
+ .alloc_lock = SPIN_LOCK_UNLOCKED, \
+ .proc_lock = SPIN_LOCK_UNLOCKED, \
+ .switch_lock = SPIN_LOCK_UNLOCKED, \
+ .journal_info = NULL, \
+ .ptd = { [ 0 ... ADEOS_ROOT_NPTDKEYS - 1] = 0 } \
+}
+
+#else /* !CONFIG_ADEOS_CORE */
+
#define INIT_TASK(tsk) \
{ \
.state = 0, \
@@ -114,6 +168,7 @@
.journal_info = NULL, \
}
+#endif /* CONFIG_ADEOS_CORE */
#endif
diff -Nru linux-2.6.10/include/linux/preempt.h
linux-2.6.10-adeos-ppc64-devel/include/linux/preempt.h
--- linux-2.6.10/include/linux/preempt.h 2004-12-24 23:34:26.000000000
+0200
+++ linux-2.6.10-adeos-ppc64-devel/include/linux/preempt.h 2005-05-12
13:36:51.000000000 +0300
@@ -25,6 +25,47 @@
asmlinkage void preempt_schedule(void);
+#ifdef CONFIG_ADEOS_CORE
+
+#include <asm/adeos.h>
+
+extern adomain_t *adp_cpu_current[],
+ *adp_root;
+
+#define preempt_disable() \
+do { \
+ if (adp_current == adp_root) { \
+ inc_preempt_count(); \
+ barrier(); \
+ } \
+} while (0)
+
+#define preempt_enable_no_resched() \
+do { \
+ if (adp_current == adp_root) { \
+ barrier(); \
+ dec_preempt_count(); \
+ } \
+} while (0)
+
+#define preempt_check_resched() \
+do { \
+ if (adp_current == adp_root) { \
+ if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
+ preempt_schedule(); \
+ } \
+} while (0)
+
+#define preempt_enable() \
+do { \
+ if (adp_current == adp_root) { \
+ preempt_enable_no_resched(); \
+ preempt_check_resched(); \
+ } \
+} while (0)
+
+#else /* !CONFIG_ADEOS_CORE */
+
#define preempt_disable() \
do { \
inc_preempt_count(); \
@@ -49,6 +90,8 @@
preempt_check_resched(); \
} while (0)
+#endif /* CONFIG_ADEOS_CORE */
+
#else
#define preempt_disable() do { } while (0)
diff -Nru linux-2.6.10/include/linux/sched.h
linux-2.6.10-adeos-ppc64-devel/include/linux/sched.h
--- linux-2.6.10/include/linux/sched.h 2004-12-24 23:33:59.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-devel/include/linux/sched.h 2005-05-12
13:36:51.000000000 +0300
@@ -4,6 +4,9 @@
#include <asm/param.h> /* for HZ */
#include <linux/config.h>
+#ifdef CONFIG_ADEOS_CORE
+#include <linux/adeos.h>
+#endif /* CONFIG_ADEOS_CORE */
#include <linux/capability.h>
#include <linux/threads.h>
#include <linux/kernel.h>
@@ -664,6 +667,10 @@
struct mempolicy *mempolicy;
short il_next; /* could be shared with used_math */
#endif
+
+#ifdef CONFIG_ADEOS_CORE
+ void *ptd[ADEOS_ROOT_NPTDKEYS];
+#endif /* CONFIG_ADEOS_CORE */
};
static inline pid_t process_group(struct task_struct *tsk)
diff -Nru linux-2.6.10/init/main.c linux-2.6.10-adeos-ppc64-devel/init/main.c
--- linux-2.6.10/init/main.c 2004-12-24 23:34:01.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-devel/init/main.c 2005-05-12 13:36:51.000000000
+0300
@@ -526,6 +526,11 @@
init_timers();
softirq_init();
time_init();
+#ifdef CONFIG_ADEOS_CORE
+ /* On PPC, we need calibrated values for the decrementer to
+ initialize, so run time_init() first. */
+ __adeos_init();
+#endif /* CONFIG_ADEOS_CORE */
/*
* HACK ALERT! This is early. We're enabling the console before
@@ -652,6 +657,11 @@
sock_init();
do_initcalls();
+
+#ifdef CONFIG_ADEOS
+ /* i.e. Permanent pipelining from boot onwards. */
+ __adeos_takeover();
+#endif /* CONFIG_ADEOS */
}
static void do_pre_smp_initcalls(void)
diff -Nru linux-2.6.10/kernel/adeos.c
linux-2.6.10-adeos-ppc64-devel/kernel/adeos.c
--- linux-2.6.10/kernel/adeos.c 1970-01-01 02:00:00.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-devel/kernel/adeos.c 2005-05-12
13:36:51.000000000 +0300
@@ -0,0 +1,800 @@
+/*
+ * linux/kernel/adeos.c
+ *
+ * Copyright (C) 2002,2003,2004 Philippe Gerum.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Architecture-independent ADEOS core support.
+ */
+
+#include <linux/sched.h>
+#include <linux/module.h>
+#ifdef CONFIG_PROC_FS
+#include <linux/proc_fs.h>
+#endif /* CONFIG_PROC_FS */
+
+/* The pre-defined domain slot for the root domain. */
+static adomain_t adeos_root_domain;
+
+/* A constant pointer to the root domain. */
+adomain_t *adp_root = &adeos_root_domain;
+
+/* A pointer to the current domain. */
+adomain_t *adp_cpu_current[ADEOS_NR_CPUS] = { [ 0 ... ADEOS_NR_CPUS - 1] =
&adeos_root_domain };
+
+/* The spinlock protecting from races while modifying the pipeline. */
+spinlock_t __adeos_pipelock = SPIN_LOCK_UNLOCKED;
+
+/* The pipeline data structure. Enqueues adomain_t objects by priority. */
+struct list_head __adeos_pipeline;
+
+/* A global flag telling whether Adeos pipelining is engaged. */
+int adp_pipelined;
+
+/* An array of global counters tracking domains monitoring events. */
+int __adeos_event_monitors[ADEOS_NR_EVENTS] = { [ 0 ... ADEOS_NR_EVENTS - 1] =
0 };
+
+/* The allocated VIRQ map. */
+unsigned long __adeos_virtual_irq_map = 0;
+
+/* A VIRQ to kick printk() output out when the root domain is in control. */
+unsigned __adeos_printk_virq;
+
+#ifdef CONFIG_ADEOS_PROFILING
+adprofdata_t __adeos_profile_data[ADEOS_NR_CPUS];
+#endif /* CONFIG_ADEOS_PROFILING */
+
+static void __adeos_set_root_ptd (int key, void *value) {
+
+ current->ptd[key] = value;
+}
+
+static void *__adeos_get_root_ptd (int key) {
+
+ return current->ptd[key];
+}
+
+/* adeos_init() -- Initialization routine of the ADEOS layer. Called
+ by the host kernel early during the boot procedure. */
+
+void __adeos_init (void)
+
+{
+ adomain_t *adp = &adeos_root_domain;
+
+ __adeos_check_platform(); /* Do platform dependent checks first. */
+
+ /*
+ A lightweight registration code for the root domain. Current
+ assumptions are:
+ - We are running on the boot CPU, and secondary CPUs are still
+ lost in space.
+ - adeos_root_domain has been zero'ed.
+ */
+
+ INIT_LIST_HEAD(&__adeos_pipeline);
+
+ adp->name = "Linux";
+ adp->domid = ADEOS_ROOT_ID;
+ adp->priority = ADEOS_ROOT_PRI;
+ adp->ptd_setfun = &__adeos_set_root_ptd;
+ adp->ptd_getfun = &__adeos_get_root_ptd;
+ adp->ptd_keymax = ADEOS_ROOT_NPTDKEYS;
+
+ __adeos_init_stage(adp);
+
+ INIT_LIST_HEAD(&adp->p_link);
+ list_add_tail(&adp->p_link,&__adeos_pipeline);
+
+ __adeos_init_platform();
+
+ __adeos_printk_virq = adeos_alloc_irq(); /* Cannot fail here. */
+ adp->irqs[__adeos_printk_virq].handler = &__adeos_sync_console;
+ adp->irqs[__adeos_printk_virq].acknowledge = NULL;
+ adp->irqs[__adeos_printk_virq].control = IPIPE_HANDLE_MASK;
+
+ printk(KERN_INFO "Adeos %s: Root domain %s registered.\n",
+ ADEOS_VERSION_STRING,
+ adp->name);
+}
+
+/* adeos_handle_event() -- Adeos' generic event handler. This routine
+ calls the per-domain handlers registered for a given
+ exception/event. Each domain before the one which raised the event
+ in the pipeline will get a chance to process the event. The latter
+ will eventually be allowed to process its own event too if a valid
+ handler exists for it. Handler executions are always scheduled by
+ the domain which raised the event for the higher priority domains
+ wanting to be notified of such event. Note: evdata might be
+ NULL. */
+
+#ifdef CONFIG_ADEOS_THREADS
+
+asmlinkage int __adeos_handle_event (unsigned event, void *evdata)
+/* asmlinkage is there just in case CONFIG_REGPARM is enabled... */
+{
+ struct list_head *pos, *npos;
+ adomain_t *this_domain;
+ unsigned long flags;
+ adeos_declare_cpuid;
+ adevinfo_t evinfo;
+ int propagate = 1;
+
+ adeos_lock_cpu(flags);
+
+ this_domain = adp_cpu_current[cpuid];
+
+ list_for_each_safe(pos,npos,&__adeos_pipeline) {
+
+ adomain_t *next_domain = list_entry(pos,adomain_t,p_link);
+
+ if (next_domain->events[event].handler != NULL)
+ {
+ if (next_domain == this_domain)
+ {
+ adeos_unlock_cpu(flags);
+ evinfo.domid = this_domain->domid;
+ evinfo.event = event;
+ evinfo.evdata = evdata;
+ evinfo.propagate = 0;
+ this_domain->events[event].handler(&evinfo);
+ propagate = evinfo.propagate;
+ goto done;
+ }
+
+ next_domain->cpudata[cpuid].event_info.domid = this_domain->domid;
+ next_domain->cpudata[cpuid].event_info.event = event;
+ next_domain->cpudata[cpuid].event_info.evdata = evdata;
+ next_domain->cpudata[cpuid].event_info.propagate = 0;
+ __set_bit(IPIPE_XPEND_FLAG,&next_domain->cpudata[cpuid].status);
+
+ /* Let the higher priority domain process the event. */
+ __adeos_switch_to(this_domain,next_domain,cpuid);
+
+ adeos_load_cpuid(); /* Processor might have changed. */
+
+ if (!next_domain->cpudata[cpuid].event_info.propagate)
+ {
+ propagate = 0;
+ break;
+ }
+ }
+
+ if (next_domain == this_domain)
+ break;
+ }
+
+ adeos_unlock_cpu(flags);
+
+ done:
+
+ return !propagate;
+}
+
+#else /* !CONFIG_ADEOS_THREADS */
+
+asmlinkage int __adeos_handle_event (unsigned event, void *evdata)
+/* asmlinkage is there just in case CONFIG_REGPARM is enabled... */
+{
+ adomain_t *start_domain, *this_domain, *next_domain;
+ struct list_head *pos, *npos;
+ unsigned long flags;
+ adeos_declare_cpuid;
+ adevinfo_t evinfo;
+ int propagate = 1;
+
+ adeos_lock_cpu(flags);
+
+ start_domain = this_domain = adp_cpu_current[cpuid];
+
+ list_for_each_safe(pos,npos,&__adeos_pipeline) {
+
+ next_domain = list_entry(pos,adomain_t,p_link);
+
+ if (next_domain->events[event].handler != NULL)
+ {
+ adp_cpu_current[cpuid] = next_domain;
+ evinfo.domid = start_domain->domid;
+ adeos_unlock_cpu(flags);
+ evinfo.event = event;
+ evinfo.evdata = evdata;
+ evinfo.propagate = 0;
+ next_domain->events[event].handler(&evinfo);
+ adeos_lock_cpu(flags);
+
+ if (adp_cpu_current[cpuid] != next_domain)
+ /* Something has changed the current domain under our
+ feet recycling the register set; take note. */
+ this_domain = adp_cpu_current[cpuid];
+
+ propagate = evinfo.propagate;
+ }
+
+ if (next_domain == this_domain || !propagate)
+ break;
+ }
+
+ adp_cpu_current[cpuid] = this_domain;
+
+ adeos_unlock_cpu(flags);
+
+ return !propagate;
+}
+
+#endif /* CONFIG_ADEOS_THREADS */
+
+void __adeos_stall_root (void)
+
+{
+ if (adp_pipelined)
+ {
+ adeos_declare_cpuid;
+
+#ifdef CONFIG_SMP
+ unsigned long flags;
+ adeos_lock_cpu(flags);
+ __set_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status);
+ adeos_unlock_cpu(flags);
+#else /* !CONFIG_SMP */
+ set_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status);
+#endif /* CONFIG_SMP */
+ }
+ else
+ adeos_hw_cli();
+}
+
+void __adeos_unstall_root (void)
+
+{
+ if (adp_pipelined)
+ {
+ adeos_declare_cpuid;
+
+ adeos_hw_cli();
+
+ adeos_load_cpuid();
+
+ __clear_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status);
+
+ if (adp_root->cpudata[cpuid].irq_pending_hi != 0)
+ __adeos_sync_stage(IPIPE_IRQMASK_ANY);
+ }
+
+ adeos_hw_sti(); /* Needed in both cases. */
+}
+
+unsigned long __adeos_test_root (void)
+
+{
+ if (adp_pipelined)
+ {
+ adeos_declare_cpuid;
+ unsigned long s;
+
+#ifdef CONFIG_SMP
+ unsigned long flags;
+ adeos_lock_cpu(flags);
+ s = test_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status);
+ adeos_unlock_cpu(flags);
+#else /* !CONFIG_SMP */
+ s = test_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status);
+#endif /* CONFIG_SMP */
+
+ return s;
+ }
+
+ return adeos_hw_irqs_disabled();
+}
+
+unsigned long __adeos_test_and_stall_root (void)
+
+{
+ unsigned long flags;
+
+ if (adp_pipelined)
+ {
+ adeos_declare_cpuid;
+ unsigned long s;
+
+#ifdef CONFIG_SMP
+ adeos_lock_cpu(flags);
+ s =
__test_and_set_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status);
+ adeos_unlock_cpu(flags);
+#else /* !CONFIG_SMP */
+ s = test_and_set_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status);
+#endif /* CONFIG_SMP */
+
+ return s;
+ }
+
+ adeos_hw_local_irq_save(flags);
+
+ return !adeos_hw_test_iflag(flags);
+}
+
+void fastcall __adeos_restore_root (unsigned long flags)
+
+{
+ if (flags)
+ __adeos_stall_root();
+ else
+ __adeos_unstall_root();
+}
+
+/* adeos_unstall_pipeline_from() -- Unstall the interrupt pipeline and
+ synchronize pending events from a given domain. */
+
+void fastcall adeos_unstall_pipeline_from (adomain_t *adp)
+
+{
+ adomain_t *this_domain;
+ struct list_head *pos;
+ unsigned long flags;
+ adeos_declare_cpuid;
+
+ adeos_lock_cpu(flags);
+
+ __clear_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
+
+ this_domain = adp_cpu_current[cpuid];
+
+ if (adp == this_domain)
+ {
+ if (adp->cpudata[cpuid].irq_pending_hi != 0)
+ __adeos_sync_stage(IPIPE_IRQMASK_ANY);
+
+ goto release_cpu_and_exit;
+ }
+
+ /* Attempt to flush all events that might be pending at the
+ unstalled domain level. This code is roughly lifted from
+ __adeos_walk_pipeline(). */
+
+ list_for_each(pos,&__adeos_pipeline) {
+
+ adomain_t *next_domain = list_entry(pos,adomain_t,p_link);
+
+ if (test_bit(IPIPE_STALL_FLAG,&next_domain->cpudata[cpuid].status))
+ break; /* Stalled stage -- do not go further. */
+
+ if (next_domain->cpudata[cpuid].irq_pending_hi != 0)
+ {
+ /* Since the critical IPI might be triggered by the
+ following actions, the current domain might not be
+ linked to the pipeline anymore after its handler
+ returns on SMP boxen, even if the domain remains valid
+ (see adeos_unregister_domain()), so don't make any
+ hazardous assumptions here. */
+
+ if (next_domain == this_domain)
+ __adeos_sync_stage(IPIPE_IRQMASK_ANY);
+ else
+ {
+ __adeos_switch_to(this_domain,next_domain,cpuid);
+
+ adeos_load_cpuid(); /* Processor might have changed. */
+
+ if (this_domain->cpudata[cpuid].irq_pending_hi != 0 &&
+
!test_bit(IPIPE_STALL_FLAG,&this_domain->cpudata[cpuid].status) &&
+
!test_bit(IPIPE_SYNC_FLAG,&this_domain->cpudata[cpuid].status))
+ __adeos_sync_stage(IPIPE_IRQMASK_ANY);
+ }
+
+ break;
+ }
+ else if (next_domain == this_domain)
+ break;
+ }
+
+release_cpu_and_exit:
+
+ if (__adeos_pipeline_head_p(adp))
+ adeos_hw_sti();
+ else
+ adeos_unlock_cpu(flags);
+}
+
+/* adeos_suspend_domain() -- tell the ADEOS layer that the current
+ domain is now dormant. The calling domain is switched out, while
+ the next domain with work in progress or pending in the pipeline is
+ switched in. */
+
+#ifdef CONFIG_ADEOS_THREADS
+
+#define __flush_pipeline_stage() \
+do { \
+ if (!test_bit(IPIPE_STALL_FLAG,&cpudata->status) && \
+ cpudata->irq_pending_hi != 0) \
+ { \
+ __adeos_sync_stage(IPIPE_IRQMASK_ANY); \
+ adeos_load_cpuid(); \
+ cpudata = &this_domain->cpudata[cpuid]; \
+ } \
+} while(0)
+
+void adeos_suspend_domain (void)
+
+{
+ adomain_t *this_domain, *next_domain;
+ struct adcpudata *cpudata;
+ struct list_head *ln;
+ unsigned long flags;
+ adeos_declare_cpuid;
+
+ adeos_lock_cpu(flags);
+
+ this_domain = next_domain = adp_cpu_current[cpuid];
+ cpudata = &this_domain->cpudata[cpuid];
+
+ /* A suspending domain implicitely unstalls the pipeline. */
+ __clear_bit(IPIPE_STALL_FLAG,&cpudata->status);
+
+ /* Make sure that no event remains stuck in the pipeline. This
+ could happen with emerging SMP instances, or domains which
+ forget to unstall their stage before calling us. */
+ __flush_pipeline_stage();
+
+ for (;;)
+ {
+ ln = next_domain->p_link.next;
+
+ if (ln == &__adeos_pipeline) /* End of pipeline reached? */
+ /* Caller should loop on its idle task on return. */
+ goto release_cpu_and_exit;
+
+ next_domain = list_entry(ln,adomain_t,p_link);
+
+ /* Make sure the domain was preempted (i.e. not sleeping) or
+ has some event to process before switching to it. */
+
+ if (__adeos_domain_work_p(next_domain,cpuid))
+ break;
+ }
+
+ /* Mark the outgoing domain as aslept (i.e. not preempted). */
+ __set_bit(IPIPE_SLEEP_FLAG,&cpudata->status);
+
+ /* Suspend the calling domain, switching to the next one. */
+ __adeos_switch_to(this_domain,next_domain,cpuid);
+
+#ifdef CONFIG_SMP
+ adeos_load_cpuid(); /* Processor might have changed. */
+ cpudata = &this_domain->cpudata[cpuid];
+#endif /* CONFIG_SMP */
+
+ /* Clear the sleep bit for the incoming domain. */
+ __clear_bit(IPIPE_SLEEP_FLAG,&cpudata->status);
+
+ /* Now, we are back into the calling domain. Flush the interrupt
+ log and fire the event interposition handler if needed. CPU
+ migration is allowed in SMP-mode on behalf of an event handler
+ provided that the current domain raised it. Otherwise, it's
+ not. */
+
+ __flush_pipeline_stage();
+
+ if (__test_and_clear_bit(IPIPE_XPEND_FLAG,&cpudata->status))
+ {
+ adeos_unlock_cpu(flags);
+
this_domain->events[cpudata->event_info.event].handler(&cpudata->event_info);
+ return;
+ }
+
+release_cpu_and_exit:
+
+ adeos_unlock_cpu(flags);
+
+ /* Return to the point of suspension in the calling domain. */
+}
+
+#else /* !CONFIG_ADEOS_THREADS */
+
+void adeos_suspend_domain (void)
+
+{
+ adomain_t *this_domain, *next_domain;
+ struct list_head *ln;
+ unsigned long flags;
+ adeos_declare_cpuid;
+
+ adeos_lock_cpu(flags);
+
+ this_domain = next_domain = adp_cpu_current[cpuid];
+
+ __clear_bit(IPIPE_STALL_FLAG,&this_domain->cpudata[cpuid].status);
+
+ if (this_domain->cpudata[cpuid].irq_pending_hi != 0)
+ goto sync_stage;
+
+ for (;;)
+ {
+ ln = next_domain->p_link.next;
+
+ if (ln == &__adeos_pipeline)
+ break;
+
+ next_domain = list_entry(ln,adomain_t,p_link);
+
+ if (test_bit(IPIPE_STALL_FLAG,&next_domain->cpudata[cpuid].status))
+ break;
+
+ if (next_domain->cpudata[cpuid].irq_pending_hi == 0)
+ continue;
+
+ adp_cpu_current[cpuid] = next_domain;
+
+ if (next_domain->dswitch)
+ next_domain->dswitch();
+
+ sync_stage:
+
+ __adeos_sync_stage(IPIPE_IRQMASK_ANY);
+
+ adeos_load_cpuid(); /* Processor might have changed. */
+
+ if (adp_cpu_current[cpuid] != next_domain)
+ /* Something has changed the current domain under our feet
+ recycling the register set; take note. */
+ this_domain = adp_cpu_current[cpuid];
+ }
+
+ adp_cpu_current[cpuid] = this_domain;
+
+ adeos_unlock_cpu(flags);
+}
+
+#endif /* CONFIG_ADEOS_THREADS */
+
+/* adeos_alloc_irq() -- Allocate a virtual/soft pipelined interrupt.
+ Virtual interrupts are handled in exactly the same way than their
+ hw-generated counterparts. This is a very basic, one-way only,
+ inter-domain communication system (see adeos_trigger_irq()). Note:
+ it is not necessary for a domain to allocate a virtual interrupt to
+ trap it using adeos_virtualize_irq(). The newly allocated VIRQ
+ number which can be passed to other IRQ-related services is
+ returned on success, zero otherwise (i.e. no more virtual interrupt
+ channel is available). We need this service as part of the Adeos
+ bootstrap code, hence it must reside in a built-in area. */
+
+unsigned adeos_alloc_irq (void)
+
+{
+ unsigned long flags, irq = 0;
+ int ipos;
+
+ adeos_spin_lock_irqsave(&__adeos_pipelock,flags);
+
+ if (__adeos_virtual_irq_map != ~0)
+ {
+ ipos = ffz(__adeos_virtual_irq_map);
+ set_bit(ipos,&__adeos_virtual_irq_map);
+ irq = ipos + IPIPE_VIRQ_BASE;
+ }
+
+ adeos_spin_unlock_irqrestore(&__adeos_pipelock,flags);
+
+ return irq;
+}
+
+#ifdef CONFIG_PROC_FS
+
+#include <linux/proc_fs.h>
+
+static struct proc_dir_entry *adeos_proc_entry;
+
+static int __adeos_read_proc (char *page,
+ char **start,
+ off_t off,
+ int count,
+ int *eof,
+ void *data)
+{
+ unsigned long ctlbits;
+ struct list_head *pos;
+ unsigned irq, _irq;
+ char *p = page;
+ int len;
+
+#ifdef CONFIG_ADEOS_MODULE
+ p += sprintf(p,"Adeos %s -- Pipelining:
%s",ADEOS_VERSION_STRING,adp_pipelined ? "active" : "stopped");
+#else /* !CONFIG_ADEOS_MODULE */
+ p += sprintf(p,"Adeos %s -- Pipelining: permanent",ADEOS_VERSION_STRING);
+#endif /* CONFIG_ADEOS_MODULE */
+#ifdef CONFIG_ADEOS_THREADS
+ p += sprintf(p, " (threaded)\n\n");
+#else /* CONFIG_ADEOS_THREADS */
+ p += sprintf(p, "\n\n");
+#endif /* CONFIG_ADEOS_THREADS */
+
+ spin_lock(&__adeos_pipelock);
+
+ list_for_each(pos,&__adeos_pipeline) {
+
+ adomain_t *adp = list_entry(pos,adomain_t,p_link);
+
+ p += sprintf(p,"%8s: priority=%d, id=0x%.8x, ptdkeys=%d/%d\n",
+ adp->name,
+ adp->priority,
+ adp->domid,
+ adp->ptd_keycount,
+ adp->ptd_keymax);
+ irq = 0;
+
+ while (irq < IPIPE_NR_IRQS)
+ {
+ ctlbits = (adp->irqs[irq].control &
(IPIPE_HANDLE_MASK|IPIPE_PASS_MASK|IPIPE_STICKY_MASK));
+
+ if (irq >= IPIPE_NR_XIRQS && !adeos_virtual_irq_p(irq))
+ {
+ /* There might be a hole between the last external IRQ
+ and the first virtual one; skip it. */
+ irq++;
+ continue;
+ }
+
+ if (adeos_virtual_irq_p(irq) && !test_bit(irq -
IPIPE_VIRQ_BASE,&__adeos_virtual_irq_map))
+ {
+ /* Non-allocated virtual IRQ; skip it. */
+ irq++;
+ continue;
+ }
+
+ /* Attempt to group consecutive IRQ numbers having the
+ same virtualization settings in a single line. */
+
+ _irq = irq;
+
+ while (++_irq < IPIPE_NR_IRQS)
+ {
+ if (adeos_virtual_irq_p(_irq) != adeos_virtual_irq_p(irq) ||
+ (adeos_virtual_irq_p(_irq) &&
+ !test_bit(_irq -
IPIPE_VIRQ_BASE,&__adeos_virtual_irq_map)) ||
+ ctlbits != (adp->irqs[_irq].control &
(IPIPE_HANDLE_MASK|IPIPE_PASS_MASK|IPIPE_STICKY_MASK)))
+ break;
+ }
+
+ if (_irq == irq + 1)
+ p += sprintf(p,"\tirq%u: ",irq);
+ else
+ p += sprintf(p,"\tirq%u-%u: ",irq,_irq - 1);
+
+ /* Statuses are as follows:
+ o "accepted" means handled _and_ passed down the
+ pipeline.
+ o "grabbed" means handled, but the interrupt might be
+ terminated _or_ passed down the pipeline depending on
+ what the domain handler asks for to Adeos.
+ o "passed" means unhandled by the domain but passed
+ down the pipeline.
+ o "discarded" means unhandled and _not_ passed down the
+ pipeline. The interrupt merely disappears from the
+ current domain down to the end of the pipeline. */
+
+ if (ctlbits & IPIPE_HANDLE_MASK)
+ {
+ if (ctlbits & IPIPE_PASS_MASK)
+ p += sprintf(p,"accepted");
+ else
+ p += sprintf(p,"grabbed");
+ }
+ else if (ctlbits & IPIPE_PASS_MASK)
+ p += sprintf(p,"passed");
+ else
+ p += sprintf(p,"discarded");
+
+ if (ctlbits & IPIPE_STICKY_MASK)
+ p += sprintf(p,", sticky");
+
+ if (adeos_virtual_irq_p(irq))
+ p += sprintf(p,", virtual");
+
+ p += sprintf(p,"\n");
+
+ irq = _irq;
+ }
+ }
+
+ spin_unlock(&__adeos_pipelock);
+
+ len = p - page;
+
+ if (len <= off + count)
+ *eof = 1;
+
+ *start = page + off;
+
+ len -= off;
+
+ if (len > count)
+ len = count;
+
+ if (len < 0)
+ len = 0;
+
+ return len;
+}
+
+void __adeos_init_proc (void) {
+
+ adeos_proc_entry = create_proc_read_entry("adeos",
+ 0444,
+ NULL,
+ &__adeos_read_proc,
+ NULL);
+}
+
+#endif /* CONFIG_PROC_FS */
+
+void __adeos_dump_state (void)
+
+{
+ int _cpuid, nr_cpus = num_online_cpus();
+ struct list_head *pos;
+ unsigned long flags;
+ adeos_declare_cpuid;
+
+ adeos_lock_cpu(flags);
+
+ printk(KERN_WARNING "Adeos: Current domain=%s on CPU #%d [stackbase=%p]\n",
+ adp_current->name,
+ cpuid,
+#ifdef CONFIG_ADEOS_THREADS
+ (void *)adp_current->estackbase[cpuid]
+#else /* !CONFIG_ADEOS_THREADS */
+ current
+#endif /* CONFIG_ADEOS_THREADS */
+ );
+
+ list_for_each(pos,&__adeos_pipeline) {
+
+ adomain_t *adp = list_entry(pos,adomain_t,p_link);
+
+ for (_cpuid = 0; _cpuid < nr_cpus; _cpuid++)
+ printk(KERN_WARNING "%8s[cpuid=%d]: priority=%d, status=0x%lx,
pending_hi=0x%lx\n",
+ adp->name,
+ _cpuid,
+ adp->priority,
+ adp->cpudata[_cpuid].status,
+ adp->cpudata[_cpuid].irq_pending_hi);
+ }
+
+ adeos_unlock_cpu(flags);
+}
+
+EXPORT_SYMBOL(adeos_suspend_domain);
+EXPORT_SYMBOL(adeos_alloc_irq);
+EXPORT_SYMBOL(adp_cpu_current);
+EXPORT_SYMBOL(adp_root);
+EXPORT_SYMBOL(adp_pipelined);
+EXPORT_SYMBOL(__adeos_handle_event);
+EXPORT_SYMBOL(__adeos_unstall_root);
+EXPORT_SYMBOL(__adeos_stall_root);
+EXPORT_SYMBOL(__adeos_restore_root);
+EXPORT_SYMBOL(__adeos_test_and_stall_root);
+EXPORT_SYMBOL(__adeos_test_root);
+EXPORT_SYMBOL(__adeos_dump_state);
+EXPORT_SYMBOL(__adeos_pipeline);
+EXPORT_SYMBOL(__adeos_pipelock);
+EXPORT_SYMBOL(__adeos_virtual_irq_map);
+EXPORT_SYMBOL(__adeos_event_monitors);
+EXPORT_SYMBOL(adeos_unstall_pipeline_from);
+#ifdef CONFIG_ADEOS_PROFILING
+EXPORT_SYMBOL(__adeos_profile_data);
+#endif /* CONFIG_ADEOS_PROFILING */
+/* The following are convenience exports which are needed by some
+ Adeos domains loaded as kernel modules. */
+EXPORT_SYMBOL(do_exit);
diff -Nru linux-2.6.10/kernel/exit.c
linux-2.6.10-adeos-ppc64-devel/kernel/exit.c
--- linux-2.6.10/kernel/exit.c 2004-12-24 23:35:27.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-devel/kernel/exit.c 2005-05-12
13:36:51.000000000 +0300
@@ -809,6 +809,9 @@
group_dead = atomic_dec_and_test(&tsk->signal->live);
if (group_dead)
acct_process(code);
+#ifdef CONFIG_ADEOS_CORE
+ __adeos_exit_process(tsk);
+#endif /* CONFIG_ADEOS_CORE */
__exit_mm(tsk);
exit_sem(tsk);
diff -Nru linux-2.6.10/kernel/fork.c
linux-2.6.10-adeos-ppc64-devel/kernel/fork.c
--- linux-2.6.10/kernel/fork.c 2004-12-24 23:33:59.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-devel/kernel/fork.c 2005-05-12
13:36:51.000000000 +0300
@@ -1021,6 +1021,14 @@
nr_threads++;
write_unlock_irq(&tasklist_lock);
+#ifdef CONFIG_ADEOS_CORE
+ {
+ int k;
+
+ for (k = 0; k < ADEOS_ROOT_NPTDKEYS; k++)
+ p->ptd[k] = NULL;
+ }
+#endif /* CONFIG_ADEOS_CORE */
retval = 0;
fork_out:
diff -Nru linux-2.6.10/kernel/Makefile
linux-2.6.10-adeos-ppc64-devel/kernel/Makefile
--- linux-2.6.10/kernel/Makefile 2004-12-24 23:34:26.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-devel/kernel/Makefile 2005-05-12
13:36:51.000000000 +0300
@@ -9,6 +9,7 @@
rcupdate.o intermodule.o extable.o params.o posix-timers.o \
kthread.o wait.o kfifo.o sys_ni.o
+obj-$(CONFIG_ADEOS_CORE) += adeos.o
obj-$(CONFIG_FUTEX) += futex.o
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
obj-$(CONFIG_SMP) += cpu.o spinlock.o
diff -Nru linux-2.6.10/kernel/panic.c
linux-2.6.10-adeos-ppc64-devel/kernel/panic.c
--- linux-2.6.10/kernel/panic.c 2004-12-24 23:35:29.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-devel/kernel/panic.c 2005-05-12
13:36:51.000000000 +0300
@@ -70,6 +70,9 @@
va_end(args);
printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
bust_spinlocks(0);
+#ifdef CONFIG_ADEOS_CORE
+ __adeos_dump_state();
+#endif /* CONFIG_ADEOS_CORE */
#ifdef CONFIG_SMP
smp_send_stop();
diff -Nru linux-2.6.10/kernel/printk.c
linux-2.6.10-adeos-ppc64-devel/kernel/printk.c
--- linux-2.6.10/kernel/printk.c 2004-12-24 23:35:40.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-devel/kernel/printk.c 2005-05-12
13:36:51.000000000 +0300
@@ -34,6 +34,17 @@
#include <asm/uaccess.h>
+#ifdef CONFIG_ADEOS_CORE
+#undef spin_lock_irq
+#define spin_lock_irq(lock)
adeos_spin_lock_disable(lock)
+#undef spin_unlock_irq
+#define spin_unlock_irq(lock)
adeos_spin_unlock_enable(lock)
+#undef spin_lock_irqsave
+#define spin_lock_irqsave(lock, flags)
adeos_spin_lock_irqsave(lock,flags)
+#undef spin_unlock_irqrestore
+#define spin_unlock_irqrestore(lock, flags)
adeos_spin_unlock_irqrestore(lock,flags)
+#endif /* CONFIG_ADEOS_CORE */
+
#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
/* printk's without a loglevel use this.. */
@@ -556,6 +567,23 @@
log_level_unknown = 1;
}
+#ifdef CONFIG_ADEOS_CORE
+ if (adp_current != adp_root &&
!test_bit(ADEOS_SPRINTK_FLAG,&adp_current->flags)) {
+ /* When operating in asynchronous printk() mode, ensure the
+ console drivers and klogd wakeup are only run by Linux,
+ delegating the actual output to the root domain by mean of
+ a virtual IRQ kicking our sync handler. If the current
+ domain has a lower priority than Linux, then we'll get
+ immediately preempted by it. In synchronous printk() mode,
+ immediately call the console drivers. */
+ spin_unlock_irqrestore(&logbuf_lock, flags);
+
+ if (!test_and_set_bit(ADEOS_PPRINTK_FLAG,&adp_root->flags))
+ adeos_trigger_irq(__adeos_printk_virq);
+
+ goto out;
+ }
+#endif /* CONFIG_ADEOS_CORE */
if (!cpu_online(smp_processor_id()) &&
system_state != SYSTEM_RUNNING) {
/*
@@ -567,7 +595,11 @@
spin_unlock_irqrestore(&logbuf_lock, flags);
goto out;
}
- if (!down_trylock(&console_sem)) {
+#ifdef CONFIG_ADEOS_CORE
+ if (adp_current != adp_root || !down_trylock(&console_sem)) {
+#else /* !CONFIG_ADEOS_CORE */
+ if (!down_trylock(&console_sem)) {
+#endif /* CONFIG_ADEOS_CORE */
console_locked = 1;
/*
* We own the drivers. We can drop the spinlock and let
@@ -647,13 +679,47 @@
}
console_locked = 0;
console_may_schedule = 0;
+#ifdef CONFIG_ADEOS_CORE
+ if (adp_root != adp_current) {
+ spin_unlock_irqrestore(&logbuf_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&logbuf_lock, flags);
+ up(&console_sem);
+#else /* !CONFIG_ADEOS_CORE */
up(&console_sem);
spin_unlock_irqrestore(&logbuf_lock, flags);
+#endif /* CONFIG_ADEOS_CORE */
if (wake_klogd && !oops_in_progress && waitqueue_active(&log_wait))
wake_up_interruptible(&log_wait);
}
EXPORT_SYMBOL(release_console_sem);
+#ifdef CONFIG_ADEOS_CORE
+void __adeos_sync_console (unsigned virq) {
+
+ /* This handler always runs on behalf of the root (Linux) domain. */
+
+ unsigned long flags;
+
+ spin_lock_irqsave(&logbuf_lock, flags);
+
+ /* Not absolutely atomic wrt to the triggering point, but this is
+ harmless. We only try to reduce the useless triggers by a cheap
+ trick here. */
+
+ clear_bit(ADEOS_PPRINTK_FLAG,&adp_root->flags);
+
+ if (cpu_online(smp_processor_id()) && system_state == SYSTEM_RUNNING &&
!down_trylock(&console_sem)) {
+ console_locked = 1;
+ spin_unlock_irqrestore(&logbuf_lock, flags);
+ console_may_schedule = 0;
+ release_console_sem();
+ } else
+ spin_unlock_irqrestore(&logbuf_lock, flags);
+}
+#endif /* CONFIG_ADEOS_CORE */
+
/** console_conditional_schedule - yield the CPU if required
*
* If the console code is currently allowed to sleep, and
diff -Nru linux-2.6.10/kernel/sched.c
linux-2.6.10-adeos-ppc64-devel/kernel/sched.c
--- linux-2.6.10/kernel/sched.c 2004-12-24 23:35:24.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-devel/kernel/sched.c 2005-06-01
15:22:05.000000000 +0300
@@ -302,7 +302,16 @@
* Default context-switch locking:
*/
#ifndef prepare_arch_switch
+#ifdef CONFIG_ADEOS_CORE
+#define prepare_arch_switch(rq,prev,next) \
+do { \
+ struct { struct task_struct *prev, *next; } arg = { (prev), (next) }; \
+ __adeos_schedule_head(&arg); \
+ adeos_hw_cli(); \
+} while(0)
+#else /* !CONFIG_ADEOS_CORE */
# define prepare_arch_switch(rq, next) do { } while (0)
+#endif /* CONFIG_ADEOS_CORE */
# define finish_arch_switch(rq, next) spin_unlock_irq(&(rq)->lock)
# define task_running(rq, p) ((rq)->curr == (p))
#endif
@@ -1367,6 +1376,9 @@
if (current->set_child_tid)
put_user(current->pid, current->set_child_tid);
+#ifdef CONFIG_ADEOS_CORE
+ __adeos_enter_process();
+#endif /* CONFIG_ADEOS_CORE */
}
/*
@@ -2535,6 +2547,11 @@
unsigned long run_time;
int cpu, idx;
+#ifdef CONFIG_ADEOS_CORE
+ if (adp_current != adp_root) /* Let's be helpful and conservative. */
+ return;
+#endif /* CONFIG_ADEOS_CORE */
+
/*
* Test if we are atomic. Since do_exit() needs to call into
* schedule() atomically, we ignore that path for now.
@@ -2684,9 +2701,25 @@
rq->curr = next;
++*switch_count;
- prepare_arch_switch(rq, next);
+#ifdef CONFIG_ADEOS_CORE
+ prepare_arch_switch(rq, prev, next);
+#else /* !CONFIG_ADEOS_CORE */
+ prepare_arch_switch(rq, next);
+#endif /* CONFIG_ADEOS_CORE */
prev = context_switch(rq, prev, next);
barrier();
+#ifdef CONFIG_ADEOS_CORE
+ if (adp_pipelined)
+ {
+
__clear_bit(IPIPE_SYNC_FLAG,&adp_root->cpudata[task_cpu(current)].status);
+ adeos_hw_sti();
+ }
+
+ if (__adeos_schedule_tail(prev) > 0 || adp_current != adp_root)
+ /* Some event handler asked for a truncated
+ scheduling tail. Just obey. */
+ return;
+#endif /* CONFIG_ADEOS_CORE */
finish_task_switch(prev);
} else
@@ -3148,6 +3181,16 @@
retval = security_task_setscheduler(p, policy, &lp);
if (retval)
goto out_unlock;
+#ifdef CONFIG_ADEOS_CORE
+ {
+ struct { struct task_struct *task; int policy; struct sched_param
*param; } evdata = { p, policy, &lp };
+ if (__adeos_renice_process(&evdata))
+ {
+ retval = 0;
+ goto out_unlock;
+ }
+ }
+#endif /* CONFIG_ADEOS_CORE */
/*
* To be able to change p->policy safely, the apropriate
* runqueue lock must be held.
@@ -4676,3 +4719,51 @@
}
#endif /* CONFIG_MAGIC_SYSRQ */
+
+#ifdef CONFIG_ADEOS_CORE
+
+void __adeos_setscheduler_root (struct task_struct *p, int policy, int prio)
+{
+ prio_array_t *array;
+ unsigned long flags;
+ runqueue_t *rq;
+
+ read_lock_irq(&tasklist_lock);
+ rq = task_rq_lock(p, &flags);
+ array = p->array;
+ if (array)
+ deactivate_task(p, task_rq(p));
+ __setscheduler(p, policy, prio);
+ if (array) {
+ __activate_task(p, task_rq(p));
+ resched_task(rq->curr);
+ }
+ task_rq_unlock(rq, &flags);
+ read_unlock_irq(&tasklist_lock);
+}
+
+EXPORT_SYMBOL(__adeos_setscheduler_root);
+
+void __adeos_reenter_root (struct task_struct *prev,
+ int policy,
+ int prio)
+{
+ finish_task_switch(prev);
+ if (reacquire_kernel_lock(current) < 0)
+ ;
+ preempt_enable_no_resched();
+
+ if (current->policy != policy || current->rt_priority != prio)
+ __adeos_setscheduler_root(current,policy,prio);
+}
+
+EXPORT_SYMBOL(__adeos_reenter_root);
+
+void __adeos_schedule_back_root (struct task_struct *prev)
+{
+ __adeos_reenter_root(prev,current->policy,current->rt_priority);
+}
+
+EXPORT_SYMBOL(__adeos_schedule_back_root);
+
+#endif /* CONFIG_ADEOS_CORE */
diff -Nru linux-2.6.10/kernel/signal.c
linux-2.6.10-adeos-ppc64-devel/kernel/signal.c
--- linux-2.6.10/kernel/signal.c 2004-12-24 23:34:32.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-devel/kernel/signal.c 2005-05-12
13:36:51.000000000 +0300
@@ -576,6 +576,13 @@
set_tsk_thread_flag(t, TIF_SIGPENDING);
+#ifdef CONFIG_ADEOS_CORE
+ {
+ struct { struct task_struct *t; } evdata = { t };
+ __adeos_kick_process(&evdata);
+ }
+#endif /* CONFIG_ADEOS_CORE */
+
/*
* If resume is set, we want to wake it up in the TASK_STOPPED case.
* We don't check for TASK_STOPPED because there is a race with it
@@ -823,6 +830,17 @@
BUG();
#endif
+#ifdef CONFIG_ADEOS_CORE
+ /* If some domain handler in the pipeline doesn't ask for
+ propagation, return success pretending that 'sig' was
+ delivered. */
+ {
+ struct { struct task_struct *task; int sig; } evdata = { t, sig };
+ if (__adeos_signal_process(&evdata))
+ goto out;
+ }
+#endif /* CONFIG_ADEOS_CORE */
+
if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
/*
* Set up a return to indicate that we dropped the signal.
diff -Nru linux-2.6.10/kernel/sysctl.c
linux-2.6.10-adeos-ppc64-devel/kernel/sysctl.c
--- linux-2.6.10/kernel/sysctl.c 2004-12-24 23:33:59.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-devel/kernel/sysctl.c 2005-05-12
13:36:51.000000000 +0300
@@ -946,6 +946,9 @@
#ifdef CONFIG_PROC_FS
register_proc_table(root_table, proc_sys_root);
init_irq_proc();
+#ifdef CONFIG_ADEOS_CORE
+ __adeos_init_proc();
+#endif /* CONFIG_ADEOS_CORE */
#endif
}
diff -Nru linux-2.6.10/Makefile linux-2.6.10-adeos-ppc64-devel/Makefile
--- linux-2.6.10/Makefile 2004-12-24 23:35:01.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-devel/Makefile 2005-05-12 13:50:24.000000000
+0300
@@ -1,8 +1,8 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 10
-EXTRAVERSION =
-NAME=Woozy Numbat
+EXTRAVERSION = -adeos-ppc64
+NAME=Sleeping Beauty
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
@@ -558,6 +558,8 @@
ifeq ($(KBUILD_EXTMOD),)
core-y += kernel/ mm/ fs/ ipc/ security/ crypto/
+core-$(CONFIG_ADEOS) += adeos/
+
vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
$(core-y) $(core-m) $(drivers-y) $(drivers-m) \
$(net-y) $(net-m) $(libs-y) $(libs-m)))