Author: maks Date: Sun Mar 16 09:26:39 2008 New Revision: 10901 Log: update to 2.6.25-rc5-git6
no conflicts Added: dists/trunk/linux-2.6/debian/patches/bugfix/all/patch-2.6.25-rc5-git6 - copied, changed from r10900, /dists/trunk/linux-2.6/debian/patches/bugfix/all/patch-2.6.25-rc5-git4 Removed: dists/trunk/linux-2.6/debian/patches/bugfix/all/patch-2.6.25-rc5-git4 Modified: dists/trunk/linux-2.6/debian/patches/series/1~experimental.1 Copied: dists/trunk/linux-2.6/debian/patches/bugfix/all/patch-2.6.25-rc5-git6 (from r10900, /dists/trunk/linux-2.6/debian/patches/bugfix/all/patch-2.6.25-rc5-git4) ============================================================================== --- /dists/trunk/linux-2.6/debian/patches/bugfix/all/patch-2.6.25-rc5-git4 (original) +++ dists/trunk/linux-2.6/debian/patches/bugfix/all/patch-2.6.25-rc5-git6 Sun Mar 16 09:26:39 2008 @@ -24,6 +24,77 @@ power/ - directory with info on Linux PCI power management. powerpc/ +diff --git a/Documentation/acpi/dsdt-override.txt b/Documentation/acpi/dsdt-override.txt +index 5008f25..febbb1b 100644 +--- a/Documentation/acpi/dsdt-override.txt ++++ b/Documentation/acpi/dsdt-override.txt +@@ -1,15 +1,7 @@ +-Linux supports two methods of overriding the BIOS DSDT: ++Linux supports a method of overriding the BIOS DSDT: + + CONFIG_ACPI_CUSTOM_DSDT builds the image into the kernel. + +-CONFIG_ACPI_CUSTOM_DSDT_INITRD adds the image to the initrd. +- +-When to use these methods is described in detail on the ++When to use this method is described in detail on the + Linux/ACPI home page: + http://www.lesswatts.org/projects/acpi/overridingDSDT.php +- +-Note that if both options are used, the DSDT supplied +-by the INITRD method takes precedence. +- +-Documentation/initramfs-add-dsdt.sh is provided for convenience +-for use with the CONFIG_ACPI_CUSTOM_DSDT_INITRD method. +diff --git a/Documentation/acpi/initramfs-add-dsdt.sh b/Documentation/acpi/initramfs-add-dsdt.sh +deleted file mode 100755 +index 17ef6e8..0000000 +--- a/Documentation/acpi/initramfs-add-dsdt.sh ++++ /dev/null +@@ -1,43 +0,0 @@ +-#!/bin/bash +-# Adds a DSDT file to the initrd (if it's an initramfs) +-# first argument is the name of archive +-# second argument is the name of the file to add +-# The file will be copied as /DSDT.aml +- +-# 20060126: fix "Premature end of file" with some old cpio (Roland Robic) +-# 20060205: this time it should really work +- +-# check the arguments +-if [ $# -ne 2 ]; then +- program_name=$(basename $0) +- echo "\ +-$program_name: too few arguments +-Usage: $program_name initrd-name.img DSDT-to-add.aml +-Adds a DSDT file to an initrd (in initramfs format) +- +- initrd-name.img: filename of the initrd in initramfs format +- DSDT-to-add.aml: filename of the DSDT file to add +- " 1>&2 +- exit 1 +-fi +- +-# we should check it's an initramfs +- +-tempcpio=$(mktemp -d) +-# cleanup on exit, hangup, interrupt, quit, termination +-trap 'rm -rf $tempcpio' 0 1 2 3 15 +- +-# extract the archive +-gunzip -c "$1" > "$tempcpio"/initramfs.cpio || exit 1 +- +-# copy the DSDT file at the root of the directory so that we can call it "/DSDT.aml" +-cp -f "$2" "$tempcpio"/DSDT.aml +- +-# add the file +-cd "$tempcpio" +-(echo DSDT.aml | cpio --quiet -H newc -o -A -O "$tempcpio"/initramfs.cpio) || exit 1 +-cd "$OLDPWD" +- +-# re-compress the archive +-gzip -c "$tempcpio"/initramfs.cpio > "$1" +- diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index 5681e2f..518ebe6 100644 --- a/Documentation/filesystems/proc.txt @@ -45,7 +116,7 @@ swap_token_timeout ------------------ diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt -index 533e67f..49318b9 100644 +index 533e67f..622f784 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -138,7 +138,7 @@ and is between 256 and 4096 characters. It is defined in the file @@ -57,6 +128,16 @@ acpi_apic_instance= [ACPI, IOAPIC] Format: <int> +@@ -177,9 +177,6 @@ and is between 256 and 4096 characters. It is defined in the file + + acpi_no_auto_ssdt [HW,ACPI] Disable automatic loading of SSDT + +- acpi_no_initrd_override [KNL,ACPI] +- Disable loading custom ACPI tables from the initramfs +- + acpi_os_name= [HW,ACPI] Tell ACPI BIOS the name of the OS + Format: To spoof as Windows 98: ="Microsoft Windows" + diff --git a/Documentation/laptop-mode.txt b/Documentation/laptop-mode.txt deleted file mode 100644 index eeedee1..0000000 @@ -12097,10 +12178,10 @@ } diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig -index f688c21..fbcaa06 100644 +index f688c21..b4f5e85 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig -@@ -283,24 +283,23 @@ config ACPI_TOSHIBA +@@ -283,34 +283,22 @@ config ACPI_TOSHIBA If you have a legacy free Toshiba laptop (such as the Libretto L1 series), say Y. @@ -12125,15 +12206,23 @@ Enter the full path name to the file which includes the AmlCode declaration. +-config ACPI_CUSTOM_DSDT_INITRD +- bool "Read Custom DSDT from initramfs" +- depends on BLK_DEV_INITRD +- default n +- help +- This option supports a custom DSDT by optionally loading it from initrd. +- See Documentation/acpi/dsdt-override.txt + If unsure, don't enter a file name. -+ + +- If you are not using this feature now, but may use it later, +- it is safe to say Y here. +config ACPI_CUSTOM_DSDT + bool + default ACPI_CUSTOM_DSDT_FILE != "" -+ - config ACPI_CUSTOM_DSDT_INITRD - bool "Read Custom DSDT from initramfs" - depends on BLK_DEV_INITRD + + config ACPI_BLACKLIST_YEAR + int "Disable ACPI for systems before Jan 1st this year" if X86_32 diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index ce3c0a2..5b6760e 100644 --- a/drivers/acpi/bus.c @@ -12216,10 +12305,122 @@ /* This workaround is needed only on some broken machines, * which require early EC, but fail to provide ECDT */ diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c -index 8edba7b..065819b 100644 +index 8edba7b..a697fb6 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c -@@ -1237,7 +1237,7 @@ int acpi_check_resource_conflict(struct resource *res) +@@ -91,10 +91,6 @@ static DEFINE_SPINLOCK(acpi_res_lock); + #define OSI_STRING_LENGTH_MAX 64 /* arbitrary */ + static char osi_additional_string[OSI_STRING_LENGTH_MAX]; + +-#ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD +-static int acpi_no_initrd_override; +-#endif +- + /* + * "Ode to _OSI(Linux)" + * +@@ -324,67 +320,6 @@ acpi_os_predefined_override(const struct acpi_predefined_names *init_val, + return AE_OK; + } + +-#ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD +-static struct acpi_table_header *acpi_find_dsdt_initrd(void) +-{ +- struct file *firmware_file; +- mm_segment_t oldfs; +- unsigned long len, len2; +- struct acpi_table_header *dsdt_buffer, *ret = NULL; +- struct kstat stat; +- char *ramfs_dsdt_name = "/DSDT.aml"; +- +- printk(KERN_INFO PREFIX "Checking initramfs for custom DSDT\n"); +- +- /* +- * Never do this at home, only the user-space is allowed to open a file. +- * The clean way would be to use the firmware loader. +- * But this code must be run before there is any userspace available. +- * A static/init firmware infrastructure doesn't exist yet... +- */ +- if (vfs_stat(ramfs_dsdt_name, &stat) < 0) +- return ret; +- +- len = stat.size; +- /* check especially against empty files */ +- if (len <= 4) { +- printk(KERN_ERR PREFIX "Failed: DSDT only %lu bytes.\n", len); +- return ret; +- } +- +- firmware_file = filp_open(ramfs_dsdt_name, O_RDONLY, 0); +- if (IS_ERR(firmware_file)) { +- printk(KERN_ERR PREFIX "Failed to open %s.\n", ramfs_dsdt_name); +- return ret; +- } +- +- dsdt_buffer = kmalloc(len, GFP_ATOMIC); +- if (!dsdt_buffer) { +- printk(KERN_ERR PREFIX "Failed to allocate %lu bytes.\n", len); +- goto err; +- } +- +- oldfs = get_fs(); +- set_fs(KERNEL_DS); +- len2 = vfs_read(firmware_file, (char __user *)dsdt_buffer, len, +- &firmware_file->f_pos); +- set_fs(oldfs); +- if (len2 < len) { +- printk(KERN_ERR PREFIX "Failed to read %lu bytes from %s.\n", +- len, ramfs_dsdt_name); +- ACPI_FREE(dsdt_buffer); +- goto err; +- } +- +- printk(KERN_INFO PREFIX "Found %lu byte DSDT in %s.\n", +- len, ramfs_dsdt_name); +- ret = dsdt_buffer; +-err: +- filp_close(firmware_file, NULL); +- return ret; +-} +-#endif +- + acpi_status + acpi_os_table_override(struct acpi_table_header * existing_table, + struct acpi_table_header ** new_table) +@@ -398,16 +333,6 @@ acpi_os_table_override(struct acpi_table_header * existing_table, + if (strncmp(existing_table->signature, "DSDT", 4) == 0) + *new_table = (struct acpi_table_header *)AmlCode; + #endif +-#ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD +- if ((strncmp(existing_table->signature, "DSDT", 4) == 0) && +- !acpi_no_initrd_override) { +- struct acpi_table_header *initrd_table; +- +- initrd_table = acpi_find_dsdt_initrd(); +- if (initrd_table) +- *new_table = initrd_table; +- } +-#endif + if (*new_table != NULL) { + printk(KERN_WARNING PREFIX "Override [%4.4s-%8.8s], " + "this is unsafe: tainting kernel\n", +@@ -418,15 +343,6 @@ acpi_os_table_override(struct acpi_table_header * existing_table, + return AE_OK; + } + +-#ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD +-static int __init acpi_no_initrd_override_setup(char *s) +-{ +- acpi_no_initrd_override = 1; +- return 1; +-} +-__setup("acpi_no_initrd_override", acpi_no_initrd_override_setup); +-#endif +- + static irqreturn_t acpi_irq(int irq, void *dev_id) + { + u32 handled; +@@ -1237,7 +1153,7 @@ int acpi_check_resource_conflict(struct resource *res) if (clash) { if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) { @@ -13950,6 +14151,487 @@ spin_lock_bh(&iop_chan->lock); slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op); +diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig +index fe9e768..25bdc2d 100644 +--- a/drivers/firewire/Kconfig ++++ b/drivers/firewire/Kconfig +@@ -1,5 +1,3 @@ +-# -*- shell-script -*- +- + comment "An alternative FireWire stack is available with EXPERIMENTAL=y" + depends on EXPERIMENTAL=n + +@@ -21,27 +19,7 @@ config FIREWIRE + NOTE: + + You should only build ONE of the stacks, unless you REALLY know what +- you are doing. If you install both, you should configure them only as +- modules rather than link them statically, and you should blacklist one +- of the concurrent low-level drivers in /etc/modprobe.conf. Add either +- +- blacklist firewire-ohci +- or +- blacklist ohci1394 +- +- there depending on which driver you DON'T want to have auto-loaded. +- You can optionally do the same with the other IEEE 1394/ FireWire +- drivers. +- +- If you have an old modprobe which doesn't implement the blacklist +- directive, use either +- +- install firewire-ohci /bin/true +- or +- install ohci1394 /bin/true +- +- and so on, depending on which modules you DON't want to have +- auto-loaded. ++ you are doing. + + config FIREWIRE_OHCI + tristate "Support for OHCI FireWire host controllers" +@@ -57,8 +35,24 @@ config FIREWIRE_OHCI + + NOTE: + +- If you also build ohci1394 of the classic stack, blacklist either +- ohci1394 or firewire-ohci to let hotplug load only the desired driver. ++ You should only build ohci1394 or firewire-ohci, but not both. ++ If you nevertheless want to install both, you should configure them ++ only as modules and blacklist the driver(s) which you don't want to ++ have auto-loaded. Add either ++ ++ blacklist firewire-ohci ++ or ++ blacklist ohci1394 ++ blacklist video1394 ++ blacklist dv1394 ++ ++ to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf ++ depending on your distribution. The latter two modules should be ++ blacklisted together with ohci1394 because they depend on ohci1394. ++ ++ If you have an old modprobe which doesn't implement the blacklist ++ directive, use "install modulename /bin/true" for the modules to be ++ blacklisted. + + config FIREWIRE_SBP2 + tristate "Support for storage devices (SBP-2 protocol driver)" +@@ -75,9 +69,3 @@ config FIREWIRE_SBP2 + + You should also enable support for disks, CD-ROMs, etc. in the SCSI + configuration section. +- +- NOTE: +- +- If you also build sbp2 of the classic stack, blacklist either sbp2 +- or firewire-sbp2 to let hotplug load only the desired driver. +- +diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c +index 7ebad3c..996d61f 100644 +--- a/drivers/firewire/fw-ohci.c ++++ b/drivers/firewire/fw-ohci.c +@@ -33,6 +33,10 @@ + #include <asm/page.h> + #include <asm/system.h> + ++#ifdef CONFIG_PPC_PMAC ++#include <asm/pmac_feature.h> ++#endif ++ + #include "fw-ohci.h" + #include "fw-transaction.h" + +@@ -175,6 +179,7 @@ struct fw_ohci { + int generation; + int request_generation; + u32 bus_seconds; ++ bool old_uninorth; + + /* + * Spinlock for accessing fw_ohci data. Never call out of +@@ -276,19 +281,13 @@ static int ar_context_add_page(struct ar_context *ctx) + { + struct device *dev = ctx->ohci->card.device; + struct ar_buffer *ab; +- dma_addr_t ab_bus; ++ dma_addr_t uninitialized_var(ab_bus); + size_t offset; + +- ab = (struct ar_buffer *) __get_free_page(GFP_ATOMIC); ++ ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC); + if (ab == NULL) + return -ENOMEM; + +- ab_bus = dma_map_single(dev, ab, PAGE_SIZE, DMA_BIDIRECTIONAL); +- if (dma_mapping_error(ab_bus)) { +- free_page((unsigned long) ab); +- return -ENOMEM; +- } +- + memset(&ab->descriptor, 0, sizeof(ab->descriptor)); + ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | + DESCRIPTOR_STATUS | +@@ -299,8 +298,6 @@ static int ar_context_add_page(struct ar_context *ctx) + ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset); + ab->descriptor.branch_address = 0; + +- dma_sync_single_for_device(dev, ab_bus, PAGE_SIZE, DMA_BIDIRECTIONAL); +- + ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1); + ctx->last_buffer->next = ab; + ctx->last_buffer = ab; +@@ -311,15 +308,22 @@ static int ar_context_add_page(struct ar_context *ctx) + return 0; + } + ++#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) ++#define cond_le32_to_cpu(v) \ ++ (ohci->old_uninorth ? (__force __u32)(v) : le32_to_cpu(v)) ++#else ++#define cond_le32_to_cpu(v) le32_to_cpu(v) ++#endif ++ + static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) + { + struct fw_ohci *ohci = ctx->ohci; + struct fw_packet p; + u32 status, length, tcode; + +- p.header[0] = le32_to_cpu(buffer[0]); +- p.header[1] = le32_to_cpu(buffer[1]); +- p.header[2] = le32_to_cpu(buffer[2]); ++ p.header[0] = cond_le32_to_cpu(buffer[0]); ++ p.header[1] = cond_le32_to_cpu(buffer[1]); ++ p.header[2] = cond_le32_to_cpu(buffer[2]); + + tcode = (p.header[0] >> 4) & 0x0f; + switch (tcode) { +@@ -331,7 +335,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) + break; + + case TCODE_READ_BLOCK_REQUEST : +- p.header[3] = le32_to_cpu(buffer[3]); ++ p.header[3] = cond_le32_to_cpu(buffer[3]); + p.header_length = 16; + p.payload_length = 0; + break; +@@ -340,7 +344,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) + case TCODE_READ_BLOCK_RESPONSE: + case TCODE_LOCK_REQUEST: + case TCODE_LOCK_RESPONSE: +- p.header[3] = le32_to_cpu(buffer[3]); ++ p.header[3] = cond_le32_to_cpu(buffer[3]); + p.header_length = 16; + p.payload_length = p.header[3] >> 16; + break; +@@ -357,7 +361,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) + + /* FIXME: What to do about evt_* errors? */ + length = (p.header_length + p.payload_length + 3) / 4; +- status = le32_to_cpu(buffer[length]); ++ status = cond_le32_to_cpu(buffer[length]); + + p.ack = ((status >> 16) & 0x1f) - 16; + p.speed = (status >> 21) & 0x7; +@@ -375,7 +379,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) + */ + + if (p.ack + 16 == 0x09) +- ohci->request_generation = (buffer[2] >> 16) & 0xff; ++ ohci->request_generation = (p.header[2] >> 16) & 0xff; + else if (ctx == &ohci->ar_request_ctx) + fw_core_handle_request(&ohci->card, &p); + else +@@ -397,6 +401,7 @@ static void ar_context_tasklet(unsigned long data) + + if (d->res_count == 0) { + size_t size, rest, offset; ++ dma_addr_t buffer_bus; + + /* + * This descriptor is finished and we may have a +@@ -405,9 +410,7 @@ static void ar_context_tasklet(unsigned long data) + */ + + offset = offsetof(struct ar_buffer, data); +- dma_unmap_single(ohci->card.device, +- le32_to_cpu(ab->descriptor.data_address) - offset, +- PAGE_SIZE, DMA_BIDIRECTIONAL); ++ buffer_bus = le32_to_cpu(ab->descriptor.data_address) - offset; + + buffer = ab; + ab = ab->next; +@@ -423,7 +426,8 @@ static void ar_context_tasklet(unsigned long data) + while (buffer < end) + buffer = handle_ar_packet(ctx, buffer); + +- free_page((unsigned long)buffer); ++ dma_free_coherent(ohci->card.device, PAGE_SIZE, ++ buffer, buffer_bus); + ar_context_add_page(ctx); + } else { + buffer = ctx->pointer; +@@ -532,7 +536,7 @@ static int + context_add_buffer(struct context *ctx) + { + struct descriptor_buffer *desc; +- dma_addr_t bus_addr; ++ dma_addr_t uninitialized_var(bus_addr); + int offset; + + /* +@@ -1022,13 +1026,14 @@ static void bus_reset_tasklet(unsigned long data) + */ + + self_id_count = (reg_read(ohci, OHCI1394_SelfIDCount) >> 3) & 0x3ff; +- generation = (le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff; ++ generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff; + rmb(); + + for (i = 1, j = 0; j < self_id_count; i += 2, j++) { + if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) + fw_error("inconsistent self IDs\n"); +- ohci->self_id_buffer[j] = le32_to_cpu(ohci->self_id_cpu[i]); ++ ohci->self_id_buffer[j] = ++ cond_le32_to_cpu(ohci->self_id_cpu[i]); + } + rmb(); + +@@ -1316,7 +1321,7 @@ ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length) + unsigned long flags; + int retval = -EBUSY; + __be32 *next_config_rom; +- dma_addr_t next_config_rom_bus; ++ dma_addr_t uninitialized_var(next_config_rom_bus); + + ohci = fw_ohci(card); + +@@ -1487,7 +1492,7 @@ static int handle_ir_dualbuffer_packet(struct context *context, + void *p, *end; + int i; + +- if (db->first_res_count > 0 && db->second_res_count > 0) { ++ if (db->first_res_count != 0 && db->second_res_count != 0) { + if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) { + /* This descriptor isn't done yet, stop iteration. */ + return 0; +@@ -1513,7 +1518,7 @@ static int handle_ir_dualbuffer_packet(struct context *context, + memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4); + i += ctx->base.header_size; + ctx->excess_bytes += +- (le32_to_cpu(*(u32 *)(p + 4)) >> 16) & 0xffff; ++ (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff; + p += ctx->base.header_size + 4; + } + ctx->header_length = i; +@@ -2048,6 +2053,18 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) + int err; + size_t size; + ++#ifdef CONFIG_PPC_PMAC ++ /* Necessary on some machines if fw-ohci was loaded/ unloaded before */ ++ if (machine_is(powermac)) { ++ struct device_node *ofn = pci_device_to_OF_node(dev); ++ ++ if (ofn) { ++ pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1); ++ pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1); ++ } ++ } ++#endif /* CONFIG_PPC_PMAC */ ++ + ohci = kzalloc(sizeof(*ohci), GFP_KERNEL); + if (ohci == NULL) { + fw_error("Could not malloc fw_ohci data.\n"); +@@ -2066,6 +2083,10 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) + pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0); + pci_set_drvdata(dev, ohci); + ++#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) ++ ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE && ++ dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW; ++#endif + spin_lock_init(&ohci->lock); + + tasklet_init(&ohci->bus_reset_tasklet, +@@ -2182,6 +2203,19 @@ static void pci_remove(struct pci_dev *dev) + pci_disable_device(dev); + fw_card_put(&ohci->card); + ++#ifdef CONFIG_PPC_PMAC ++ /* On UniNorth, power down the cable and turn off the chip clock ++ * to save power on laptops */ ++ if (machine_is(powermac)) { ++ struct device_node *ofn = pci_device_to_OF_node(dev); ++ ++ if (ofn) { ++ pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0); ++ pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0); ++ } ++ } ++#endif /* CONFIG_PPC_PMAC */ ++ + fw_notify("Removed fw-ohci device.\n"); + } + +@@ -2202,6 +2236,16 @@ static int pci_suspend(struct pci_dev *pdev, pm_message_t state) + if (err) + fw_error("pci_set_power_state failed with %d\n", err); + ++/* PowerMac suspend code comes last */ ++#ifdef CONFIG_PPC_PMAC ++ if (machine_is(powermac)) { ++ struct device_node *ofn = pci_device_to_OF_node(pdev); ++ ++ if (ofn) ++ pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0); ++ } ++#endif /* CONFIG_PPC_PMAC */ ++ + return 0; + } + +@@ -2210,6 +2254,16 @@ static int pci_resume(struct pci_dev *pdev) + struct fw_ohci *ohci = pci_get_drvdata(pdev); + int err; + ++/* PowerMac resume code comes first */ ++#ifdef CONFIG_PPC_PMAC ++ if (machine_is(powermac)) { ++ struct device_node *ofn = pci_device_to_OF_node(pdev); ++ ++ if (ofn) ++ pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1); ++ } ++#endif /* CONFIG_PPC_PMAC */ ++ + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + err = pci_enable_device(pdev); +diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c +index 03069a4..62b4e47 100644 +--- a/drivers/firewire/fw-sbp2.c ++++ b/drivers/firewire/fw-sbp2.c +@@ -173,6 +173,7 @@ struct sbp2_target { + #define SBP2_ORB_TIMEOUT 2000U /* Timeout in ms */ + #define SBP2_ORB_NULL 0x80000000 + #define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000 ++#define SBP2_RETRY_LIMIT 0xf /* 15 retries */ + + #define SBP2_DIRECTION_TO_MEDIA 0x0 + #define SBP2_DIRECTION_FROM_MEDIA 0x1 +@@ -330,6 +331,11 @@ static const struct { + .model = ~0, + .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, + }, ++ /* Datafab MD2-FW2 with Symbios/LSILogic SYM13FW500 bridge */ { ++ .firmware_revision = 0x002600, ++ .model = ~0, ++ .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, ++ }, + + /* + * There are iPods (2nd gen, 3rd gen) with model_id == 0, but +@@ -812,6 +818,30 @@ static void sbp2_target_put(struct sbp2_target *tgt) + kref_put(&tgt->kref, sbp2_release_target); + } + ++static void ++complete_set_busy_timeout(struct fw_card *card, int rcode, ++ void *payload, size_t length, void *done) ++{ ++ complete(done); ++} ++ ++static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu) ++{ ++ struct fw_device *device = fw_device(lu->tgt->unit->device.parent); ++ DECLARE_COMPLETION_ONSTACK(done); ++ struct fw_transaction t; ++ static __be32 busy_timeout; ++ ++ /* FIXME: we should try to set dual-phase cycle_limit too */ ++ busy_timeout = cpu_to_be32(SBP2_RETRY_LIMIT); ++ ++ fw_send_request(device->card, &t, TCODE_WRITE_QUADLET_REQUEST, ++ lu->tgt->node_id, lu->generation, device->max_speed, ++ CSR_REGISTER_BASE + CSR_BUSY_TIMEOUT, &busy_timeout, ++ sizeof(busy_timeout), complete_set_busy_timeout, &done); ++ wait_for_completion(&done); ++} ++ + static void sbp2_reconnect(struct work_struct *work); + + static void sbp2_login(struct work_struct *work) +@@ -864,10 +894,8 @@ static void sbp2_login(struct work_struct *work) + fw_notify("%s: logged in to LUN %04x (%d retries)\n", + tgt->bus_id, lu->lun, lu->retries); + +-#if 0 +- /* FIXME: The linux1394 sbp2 does this last step. */ +- sbp2_set_busy_timeout(scsi_id); +-#endif ++ /* set appropriate retry limit(s) in BUSY_TIMEOUT register */ ++ sbp2_set_busy_timeout(lu); + + PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect); + sbp2_agent_reset(lu); +diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c +index e47bb04..d2c7a3d 100644 +--- a/drivers/firewire/fw-topology.c ++++ b/drivers/firewire/fw-topology.c +@@ -21,6 +21,7 @@ + #include <linux/module.h> + #include <linux/wait.h> + #include <linux/errno.h> ++#include <asm/bug.h> + #include <asm/system.h> + #include "fw-transaction.h" + #include "fw-topology.h" +@@ -424,8 +425,8 @@ update_tree(struct fw_card *card, struct fw_node *root) + node1 = fw_node(list1.next); + + while (&node0->link != &list0) { ++ WARN_ON(node0->port_count != node1->port_count); + +- /* assert(node0->port_count == node1->port_count); */ + if (node0->link_on && !node1->link_on) + event = FW_NODE_LINK_OFF; + else if (!node0->link_on && node1->link_on) +diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c +index 7fcc59d..99529e5 100644 +--- a/drivers/firewire/fw-transaction.c ++++ b/drivers/firewire/fw-transaction.c +@@ -751,7 +751,7 @@ handle_topology_map(struct fw_card *card, struct fw_request *request, + void *payload, size_t length, void *callback_data) + { + int i, start, end; +- u32 *map; ++ __be32 *map; + + if (!TCODE_IS_READ_REQUEST(tcode)) { + fw_send_response(card, request, RCODE_TYPE_ERROR); +diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h +index 09cb728..a43bb22 100644 +--- a/drivers/firewire/fw-transaction.h ++++ b/drivers/firewire/fw-transaction.h +@@ -86,12 +86,12 @@ + static inline void + fw_memcpy_from_be32(void *_dst, void *_src, size_t size) + { +- u32 *dst = _dst; +- u32 *src = _src; ++ u32 *dst = _dst; ++ __be32 *src = _src; + int i; + + for (i = 0; i < size / 4; i++) +- dst[i] = cpu_to_be32(src[i]); ++ dst[i] = be32_to_cpu(src[i]); + } + + static inline void diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c index 92583cd..6e72fd3 100644 --- a/drivers/gpio/pca953x.c @@ -14015,6 +14697,22 @@ } static int i2c_device_remove(struct device *dev) +diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c +index 9e2b196..f53f72d 100644 +--- a/drivers/ieee1394/sbp2.c ++++ b/drivers/ieee1394/sbp2.c +@@ -376,6 +376,11 @@ static const struct { + .model_id = SBP2_ROM_VALUE_WILDCARD, + .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, + }, ++ /* Datafab MD2-FW2 with Symbios/LSILogic SYM13FW500 bridge */ { ++ .firmware_revision = 0x002600, ++ .model_id = SBP2_ROM_VALUE_WILDCARD, ++ .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, ++ }, + /* iPod 4th generation */ { + .firmware_revision = 0x0a2700, + .model_id = 0x000021, diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index b10ade9..4df4051 100644 --- a/drivers/infiniband/core/cm.c @@ -16983,6 +17681,19 @@ fm->addr + FM_SET_INTERRUPT_ENABLE); return 0; +diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c +index 20d5c7b..1c14a18 100644 +--- a/drivers/mmc/host/tifm_sd.c ++++ b/drivers/mmc/host/tifm_sd.c +@@ -180,7 +180,7 @@ static void tifm_sd_transfer_data(struct tifm_sd *host) + host->sg_pos++; + if (host->sg_pos == host->sg_len) { + if ((r_data->flags & MMC_DATA_WRITE) +- && DATA_CARRY) ++ && (host->cmd_flags & DATA_CARRY)) + writel(host->bounce_buf_data[0], + host->dev->addr + + SOCK_MMCSD_DATA); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index a0f0e60..fe7b5ec 100644 --- a/drivers/net/Kconfig @@ -19928,6 +20639,35 @@ return -EAGAIN; } +diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c +index 1eb771d..3e6b3f4 100644 +--- a/fs/nfsd/nfsfh.c ++++ b/fs/nfsd/nfsfh.c +@@ -232,6 +232,7 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access) + fhp->fh_dentry = dentry; + fhp->fh_export = exp; + nfsd_nr_verified++; ++ cache_get(&exp->h); + } else { + /* + * just rechecking permissions +@@ -241,6 +242,7 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access) + dprintk("nfsd: fh_verify - just checking\n"); + dentry = fhp->fh_dentry; + exp = fhp->fh_export; ++ cache_get(&exp->h); + /* + * Set user creds for this exportpoint; necessary even + * in the "just checking" case because this may be a +@@ -252,8 +254,6 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access) + if (error) + goto out; + } +- cache_get(&exp->h); +- + + error = nfsd_mode_check(rqstp, dentry->d_inode->i_mode, type); + if (error) diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index ee50c96..b8057c5 100644 --- a/fs/ocfs2/cluster/tcp.c @@ -21182,6 +21922,24 @@ /* This should work for both 32 and 64 bit userland. */ struct ethtool_cmd { +diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h +index 51d2141..adcbb05 100644 +--- a/include/linux/exportfs.h ++++ b/include/linux/exportfs.h +@@ -49,11 +49,11 @@ struct fid { + + /** + * struct export_operations - for nfsd to communicate with file systems +- * @decode_fh: decode a file handle fragment and return a &struct dentry + * @encode_fh: encode a file handle fragment from a dentry ++ * @fh_to_dentry: find the implied object and get a dentry for it ++ * @fh_to_parent: find the implied object's parent and get a dentry for it + * @get_name: find the name for a given inode in a given directory + * @get_parent: find the parent of a given directory +- * @get_dentry: find a dentry for the inode given a file handle sub-fragment + * + * See Documentation/filesystems/Exporting for details on how to use + * this interface correctly. diff --git a/include/linux/firmware.h b/include/linux/firmware.h index 33d8f20..4d10c73 100644 --- a/include/linux/firmware.h @@ -21709,6 +22467,51 @@ extern int bt_sysfs_init(void); extern void bt_sysfs_cleanup(void); +diff --git a/include/net/dst.h b/include/net/dst.h +index e3ac7d0..ae13370 100644 +--- a/include/net/dst.h ++++ b/include/net/dst.h +@@ -52,15 +52,10 @@ struct dst_entry + unsigned short header_len; /* more space at head required */ + unsigned short trailer_len; /* space to reserve at tail */ + +- u32 metrics[RTAX_MAX]; +- struct dst_entry *path; +- +- unsigned long rate_last; /* rate limiting for ICMP */ + unsigned int rate_tokens; ++ unsigned long rate_last; /* rate limiting for ICMP */ + +-#ifdef CONFIG_NET_CLS_ROUTE +- __u32 tclassid; +-#endif ++ struct dst_entry *path; + + struct neighbour *neighbour; + struct hh_cache *hh; +@@ -70,10 +65,20 @@ struct dst_entry + int (*output)(struct sk_buff*); + + struct dst_ops *ops; +- +- unsigned long lastuse; ++ ++ u32 metrics[RTAX_MAX]; ++ ++#ifdef CONFIG_NET_CLS_ROUTE ++ __u32 tclassid; ++#endif ++ ++ /* ++ * __refcnt wants to be on a different cache line from ++ * input/output/ops or performance tanks badly ++ */ + atomic_t __refcnt; /* client references */ + int __use; ++ unsigned long lastuse; + union { + struct dst_entry *next; + struct rtable *rt_next; diff --git a/include/net/irda/irttp.h b/include/net/irda/irttp.h index 32c385d..0788c23 100644 --- a/include/net/irda/irttp.h @@ -21804,6 +22607,55 @@ -endchoice + systems. Classic RCU is the default. Note that the + PREEMPT_RCU symbol is used to select/deselect this option. +diff --git a/init/initramfs.c b/init/initramfs.c +index c0b1e05..d53fee8 100644 +--- a/init/initramfs.c ++++ b/init/initramfs.c +@@ -538,7 +538,7 @@ skip: + initrd_end = 0; + } + +-int __init populate_rootfs(void) ++static int __init populate_rootfs(void) + { + char *err = unpack_to_rootfs(__initramfs_start, + __initramfs_end - __initramfs_start, 0); +@@ -577,10 +577,4 @@ int __init populate_rootfs(void) + } + return 0; + } +-#ifndef CONFIG_ACPI_CUSTOM_DSDT_INITRD +-/* +- * if this option is enabled, populate_rootfs() is called _earlier_ in the +- * boot sequence. This insures that the ACPI initialisation can find the file. +- */ + rootfs_initcall(populate_rootfs); +-#endif +diff --git a/init/main.c b/init/main.c +index fbb0167..99ce949 100644 +--- a/init/main.c ++++ b/init/main.c +@@ -102,12 +102,6 @@ static inline void mark_rodata_ro(void) { } + extern void tc_init(void); + #endif + +-#ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD +-extern int populate_rootfs(void); +-#else +-static inline void populate_rootfs(void) {} +-#endif +- + enum system_states system_state; + EXPORT_SYMBOL(system_state); + +@@ -650,7 +644,6 @@ asmlinkage void __init start_kernel(void) + + check_bugs(); + +- populate_rootfs(); /* For DSDT override from initramfs */ + acpi_early_init(); /* before LAPIC and SMP init */ + + /* Do the rest non-__init'ed, we're now alive */ diff --git a/ipc/shm.c b/ipc/shm.c index c47e872..cc63fae 100644 --- a/ipc/shm.c @@ -21994,10 +22846,95 @@ } diff --git a/kernel/sched.c b/kernel/sched.c -index b02e4fc..1cb53fb 100644 +index b02e4fc..d1ad69b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c -@@ -5813,13 +5813,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) +@@ -301,7 +301,7 @@ struct cfs_rq { + /* 'curr' points to currently running entity on this cfs_rq. + * It is set to NULL otherwise (i.e when none are currently running). + */ +- struct sched_entity *curr; ++ struct sched_entity *curr, *next; + + unsigned long nr_spread_over; + +@@ -1084,7 +1084,7 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight, + u64 tmp; + + if (unlikely(!lw->inv_weight)) +- lw->inv_weight = (WMULT_CONST - lw->weight/2) / lw->weight + 1; ++ lw->inv_weight = (WMULT_CONST-lw->weight/2) / (lw->weight+1); + + tmp = (u64)delta_exec * weight; + /* +@@ -1108,11 +1108,13 @@ calc_delta_fair(unsigned long delta_exec, struct load_weight *lw) + static inline void update_load_add(struct load_weight *lw, unsigned long inc) + { + lw->weight += inc; ++ lw->inv_weight = 0; + } + + static inline void update_load_sub(struct load_weight *lw, unsigned long dec) + { + lw->weight -= dec; ++ lw->inv_weight = 0; + } + + /* +@@ -4268,11 +4270,10 @@ void rt_mutex_setprio(struct task_struct *p, int prio) + oldprio = p->prio; + on_rq = p->se.on_rq; + running = task_current(rq, p); +- if (on_rq) { ++ if (on_rq) + dequeue_task(rq, p, 0); +- if (running) +- p->sched_class->put_prev_task(rq, p); +- } ++ if (running) ++ p->sched_class->put_prev_task(rq, p); + + if (rt_prio(prio)) + p->sched_class = &rt_sched_class; +@@ -4281,10 +4282,9 @@ void rt_mutex_setprio(struct task_struct *p, int prio) + + p->prio = prio; + ++ if (running) ++ p->sched_class->set_curr_task(rq); + if (on_rq) { +- if (running) +- p->sched_class->set_curr_task(rq); +- + enqueue_task(rq, p, 0); + + check_class_changed(rq, p, prev_class, oldprio, running); +@@ -4581,19 +4581,17 @@ recheck: + update_rq_clock(rq); + on_rq = p->se.on_rq; + running = task_current(rq, p); +- if (on_rq) { ++ if (on_rq) + deactivate_task(rq, p, 0); +- if (running) +- p->sched_class->put_prev_task(rq, p); +- } ++ if (running) ++ p->sched_class->put_prev_task(rq, p); + + oldprio = p->prio; + __setscheduler(rq, p, policy, param->sched_priority); + ++ if (running) ++ p->sched_class->set_curr_task(rq); + if (on_rq) { +- if (running) +- p->sched_class->set_curr_task(rq); +- + activate_task(rq, p, 0); + + check_class_changed(rq, p, prev_class, oldprio, running); +@@ -5813,13 +5811,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) /* Must be high prio: stop_machine expects to yield to it. */ rq = task_rq_lock(p, &flags); __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); @@ -22011,7 +22948,7 @@ task_rq_unlock(rq, &flags); cpu_rq(cpu)->migration_thread = p; break; -@@ -5828,6 +5821,15 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) +@@ -5828,6 +5819,15 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) case CPU_ONLINE_FROZEN: /* Strictly unnecessary, as first user will wake it. */ wake_up_process(cpu_rq(cpu)->migration_thread); @@ -22027,7 +22964,7 @@ break; #ifdef CONFIG_HOTPLUG_CPU -@@ -5879,7 +5881,8 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) +@@ -5879,7 +5879,8 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) spin_unlock_irq(&rq->lock); break; @@ -22037,7 +22974,7 @@ /* Update our root-domain */ rq = cpu_rq(cpu); spin_lock_irqsave(&rq->lock, flags); -@@ -6103,6 +6106,8 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) +@@ -6103,6 +6104,8 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) rq->rd = rd; cpu_set(rq->cpu, rd->span); @@ -22046,6 +22983,196 @@ for (class = sched_class_highest; class; class = class->next) { if (class->join_domain) +@@ -7613,11 +7616,10 @@ void sched_move_task(struct task_struct *tsk) + running = task_current(rq, tsk); + on_rq = tsk->se.on_rq; + +- if (on_rq) { ++ if (on_rq) + dequeue_task(rq, tsk, 0); +- if (unlikely(running)) +- tsk->sched_class->put_prev_task(rq, tsk); +- } ++ if (unlikely(running)) ++ tsk->sched_class->put_prev_task(rq, tsk); + + set_task_rq(tsk, task_cpu(tsk)); + +@@ -7626,11 +7628,10 @@ void sched_move_task(struct task_struct *tsk) + tsk->sched_class->moved_group(tsk); + #endif + +- if (on_rq) { +- if (unlikely(running)) +- tsk->sched_class->set_curr_task(rq); ++ if (unlikely(running)) ++ tsk->sched_class->set_curr_task(rq); ++ if (on_rq) + enqueue_task(rq, tsk, 0); +- } + + task_rq_unlock(rq, &flags); + } +diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c +index e2a5305..f2cc590 100644 +--- a/kernel/sched_fair.c ++++ b/kernel/sched_fair.c +@@ -175,8 +175,15 @@ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) + * Maintain a cache of leftmost tree entries (it is frequently + * used): + */ +- if (leftmost) ++ if (leftmost) { + cfs_rq->rb_leftmost = &se->run_node; ++ /* ++ * maintain cfs_rq->min_vruntime to be a monotonic increasing ++ * value tracking the leftmost vruntime in the tree. ++ */ ++ cfs_rq->min_vruntime = ++ max_vruntime(cfs_rq->min_vruntime, se->vruntime); ++ } + + rb_link_node(&se->run_node, parent, link); + rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); +@@ -184,8 +191,24 @@ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) + + static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) + { +- if (cfs_rq->rb_leftmost == &se->run_node) +- cfs_rq->rb_leftmost = rb_next(&se->run_node); ++ if (cfs_rq->rb_leftmost == &se->run_node) { ++ struct rb_node *next_node; ++ struct sched_entity *next; ++ ++ next_node = rb_next(&se->run_node); ++ cfs_rq->rb_leftmost = next_node; ++ ++ if (next_node) { ++ next = rb_entry(next_node, ++ struct sched_entity, run_node); ++ cfs_rq->min_vruntime = ++ max_vruntime(cfs_rq->min_vruntime, ++ next->vruntime); ++ } ++ } ++ ++ if (cfs_rq->next == se) ++ cfs_rq->next = NULL; + + rb_erase(&se->run_node, &cfs_rq->tasks_timeline); + } +@@ -260,12 +283,8 @@ static u64 __sched_period(unsigned long nr_running) + */ + static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) + { +- u64 slice = __sched_period(cfs_rq->nr_running); +- +- slice *= se->load.weight; +- do_div(slice, cfs_rq->load.weight); +- +- return slice; ++ return calc_delta_mine(__sched_period(cfs_rq->nr_running), ++ se->load.weight, &cfs_rq->load); + } + + /* +@@ -303,7 +322,6 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, + unsigned long delta_exec) + { + unsigned long delta_exec_weighted; +- u64 vruntime; + + schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max)); + +@@ -315,19 +333,6 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, + &curr->load); + } + curr->vruntime += delta_exec_weighted; +- +- /* +- * maintain cfs_rq->min_vruntime to be a monotonic increasing +- * value tracking the leftmost vruntime in the tree. +- */ +- if (first_fair(cfs_rq)) { +- vruntime = min_vruntime(curr->vruntime, +- __pick_next_entity(cfs_rq)->vruntime); +- } else +- vruntime = curr->vruntime; +- +- cfs_rq->min_vruntime = +- max_vruntime(cfs_rq->min_vruntime, vruntime); + } + + static void update_curr(struct cfs_rq *cfs_rq) +@@ -493,7 +498,11 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) + { + u64 vruntime; + +- vruntime = cfs_rq->min_vruntime; ++ if (first_fair(cfs_rq)) { ++ vruntime = min_vruntime(cfs_rq->min_vruntime, ++ __pick_next_entity(cfs_rq)->vruntime); ++ } else ++ vruntime = cfs_rq->min_vruntime; + + if (sched_feat(TREE_AVG)) { + struct sched_entity *last = __pick_last_entity(cfs_rq); +@@ -515,8 +524,10 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) + + if (!initial) { + /* sleeps upto a single latency don't count. */ +- if (sched_feat(NEW_FAIR_SLEEPERS)) +- vruntime -= sysctl_sched_latency; ++ if (sched_feat(NEW_FAIR_SLEEPERS)) { ++ vruntime -= calc_delta_fair(sysctl_sched_latency, ++ &cfs_rq->load); ++ } + + /* ensure we never gain time by being placed backwards. */ + vruntime = max_vruntime(se->vruntime, vruntime); +@@ -616,12 +627,32 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) + se->prev_sum_exec_runtime = se->sum_exec_runtime; + } + ++static struct sched_entity * ++pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se) ++{ ++ s64 diff, gran; ++ ++ if (!cfs_rq->next) ++ return se; ++ ++ diff = cfs_rq->next->vruntime - se->vruntime; ++ if (diff < 0) ++ return se; ++ ++ gran = calc_delta_fair(sysctl_sched_wakeup_granularity, &cfs_rq->load); ++ if (diff > gran) ++ return se; ++ ++ return cfs_rq->next; ++} ++ + static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) + { + struct sched_entity *se = NULL; + + if (first_fair(cfs_rq)) { + se = __pick_next_entity(cfs_rq); ++ se = pick_next(cfs_rq, se); + set_next_entity(cfs_rq, se); + } + +@@ -1060,6 +1091,9 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) + resched_task(curr); + return; + } ++ ++ cfs_rq_of(pse)->next = pse; ++ + /* + * Batch tasks do not preempt (their preemption is driven by + * the tick): diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 4bb5a11..0259228 100644 --- a/lib/swiotlb.c Modified: dists/trunk/linux-2.6/debian/patches/series/1~experimental.1 ============================================================================== --- dists/trunk/linux-2.6/debian/patches/series/1~experimental.1 (original) +++ dists/trunk/linux-2.6/debian/patches/series/1~experimental.1 Sun Mar 16 09:26:39 2008 @@ -1,4 +1,4 @@ -+ bugfix/all/patch-2.6.25-rc5-git4 ++ bugfix/all/patch-2.6.25-rc5-git6 + debian/version.patch + debian/kernelvariables.patch + debian/doc-build-parallel.patch _______________________________________________ Kernel-svn-changes mailing list Kernel-svn-changes@lists.alioth.debian.org http://lists.alioth.debian.org/mailman/listinfo/kernel-svn-changes