commit: b3ebb4c67c366cb01773ce5833866b9b489a483f Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> AuthorDate: Wed Jan 16 23:27:12 2019 +0000 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> CommitDate: Wed Jan 16 23:27:12 2019 +0000 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b3ebb4c6
proj/linux-patches: Linux patch 4.4.171 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org> 0000_README | 4 + 1170_linux-4.4.171.patch | 2650 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 2654 insertions(+) diff --git a/0000_README b/0000_README index ff5384e..fb7be63 100644 --- a/0000_README +++ b/0000_README @@ -723,6 +723,10 @@ Patch: 1169_linux-4.4.170.patch From: http://www.kernel.org Desc: Linux 4.4.170 +Patch: 1170_linux-4.4.171.patch +From: http://www.kernel.org +Desc: Linux 4.4.171 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1170_linux-4.4.171.patch b/1170_linux-4.4.171.patch new file mode 100644 index 0000000..4061abc --- /dev/null +++ b/1170_linux-4.4.171.patch @@ -0,0 +1,2650 @@ +diff --git a/Makefile b/Makefile +index bc58f206c0da..c6b680faedd8 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 170 ++SUBLEVEL = 171 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/crypto/cts.c b/crypto/cts.c +index e467ec0acf9f..e65688d6a4ca 100644 +--- a/crypto/cts.c ++++ b/crypto/cts.c +@@ -137,8 +137,8 @@ static int crypto_cts_encrypt(struct blkcipher_desc *desc, + lcldesc.info = desc->info; + lcldesc.flags = desc->flags; + +- if (tot_blocks == 1) { +- err = crypto_blkcipher_encrypt_iv(&lcldesc, dst, src, bsize); ++ if (tot_blocks <= 1) { ++ err = crypto_blkcipher_encrypt_iv(&lcldesc, dst, src, nbytes); + } else if (nbytes <= bsize * 2) { + err = cts_cbc_encrypt(ctx, desc, dst, src, 0, nbytes); + } else { +@@ -232,8 +232,8 @@ static int crypto_cts_decrypt(struct blkcipher_desc *desc, + lcldesc.info = desc->info; + lcldesc.flags = desc->flags; + +- if (tot_blocks == 1) { +- err = crypto_blkcipher_decrypt_iv(&lcldesc, dst, src, bsize); ++ if (tot_blocks <= 1) { ++ err = crypto_blkcipher_decrypt_iv(&lcldesc, dst, src, nbytes); + } else if (nbytes <= bsize * 2) { + err = cts_cbc_decrypt(ctx, desc, dst, src, 0, nbytes); + } else { +diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c +index 1c2b846c5776..f28b4949cb9d 100644 +--- a/drivers/acpi/power.c ++++ b/drivers/acpi/power.c +@@ -131,6 +131,23 @@ void acpi_power_resources_list_free(struct list_head *list) + } + } + ++static bool acpi_power_resource_is_dup(union acpi_object *package, ++ unsigned int start, unsigned int i) ++{ ++ acpi_handle rhandle, dup; ++ unsigned int j; ++ ++ /* The caller is expected to check the package element types */ ++ rhandle = package->package.elements[i].reference.handle; ++ for (j = start; j < i; j++) { ++ dup = package->package.elements[j].reference.handle; ++ if (dup == rhandle) ++ return true; ++ } ++ ++ return false; ++} ++ + int acpi_extract_power_resources(union acpi_object *package, unsigned int start, + struct list_head *list) + { +@@ -150,6 +167,11 @@ int acpi_extract_power_resources(union acpi_object *package, unsigned int start, + err = -ENODEV; + break; + } ++ ++ /* Some ACPI tables contain duplicate power resource references */ ++ if (acpi_power_resource_is_dup(package, start, i)) ++ continue; ++ + err = acpi_add_power_resource(rhandle); + if (err) + break; +diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c +index 94c837046786..57e3790c87b1 100644 +--- a/drivers/i2c/i2c-dev.c ++++ b/drivers/i2c/i2c-dev.c +@@ -459,9 +459,15 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + return i2cdev_ioctl_smbus(client, arg); + + case I2C_RETRIES: ++ if (arg > INT_MAX) ++ return -EINVAL; ++ + client->adapter->retries = arg; + break; + case I2C_TIMEOUT: ++ if (arg > INT_MAX) ++ return -EINVAL; ++ + /* For historical reasons, user-space sets the timeout + * value in units of 10 ms. + */ +diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c +index 99da549d5d06..0118287a8a10 100644 +--- a/drivers/pci/host/pcie-altera.c ++++ b/drivers/pci/host/pcie-altera.c +@@ -40,8 +40,10 @@ + #define P2A_INT_ENABLE 0x3070 + #define P2A_INT_ENA_ALL 0xf + #define RP_LTSSM 0x3c64 ++#define RP_LTSSM_MASK 0x1f + #define LTSSM_L0 0xf + ++#define PCIE_CAP_OFFSET 0x80 + /* TLP configuration type 0 and 1 */ + #define TLP_FMTTYPE_CFGRD0 0x04 /* Configuration Read Type 0 */ + #define TLP_FMTTYPE_CFGWR0 0x44 /* Configuration Write Type 0 */ +@@ -60,6 +62,9 @@ + #define TLP_LOOP 500 + #define RP_DEVFN 0 + ++#define LINK_UP_TIMEOUT HZ ++#define LINK_RETRAIN_TIMEOUT HZ ++ + #define INTX_NUM 4 + + #define DWORD_MASK 3 +@@ -80,25 +85,21 @@ struct tlp_rp_regpair_t { + u32 reg1; + }; + +-static void altera_pcie_retrain(struct pci_dev *dev) ++static inline void cra_writel(struct altera_pcie *pcie, const u32 value, ++ const u32 reg) + { +- u16 linkcap, linkstat; +- +- /* +- * Set the retrain bit if the PCIe rootport support > 2.5GB/s, but +- * current speed is 2.5 GB/s. +- */ +- pcie_capability_read_word(dev, PCI_EXP_LNKCAP, &linkcap); ++ writel_relaxed(value, pcie->cra_base + reg); ++} + +- if ((linkcap & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB) +- return; ++static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg) ++{ ++ return readl_relaxed(pcie->cra_base + reg); ++} + +- pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &linkstat); +- if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) +- pcie_capability_set_word(dev, PCI_EXP_LNKCTL, +- PCI_EXP_LNKCTL_RL); ++static bool altera_pcie_link_is_up(struct altera_pcie *pcie) ++{ ++ return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0); + } +-DECLARE_PCI_FIXUP_EARLY(0x1172, PCI_ANY_ID, altera_pcie_retrain); + + /* + * Altera PCIe port uses BAR0 of RC's configuration space as the translation +@@ -119,17 +120,6 @@ static bool altera_pcie_hide_rc_bar(struct pci_bus *bus, unsigned int devfn, + return false; + } + +-static inline void cra_writel(struct altera_pcie *pcie, const u32 value, +- const u32 reg) +-{ +- writel_relaxed(value, pcie->cra_base + reg); +-} +- +-static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg) +-{ +- return readl_relaxed(pcie->cra_base + reg); +-} +- + static void tlp_write_tx(struct altera_pcie *pcie, + struct tlp_rp_regpair_t *tlp_rp_regdata) + { +@@ -138,11 +128,6 @@ static void tlp_write_tx(struct altera_pcie *pcie, + cra_writel(pcie, tlp_rp_regdata->ctrl, RP_TX_CNTRL); + } + +-static bool altera_pcie_link_is_up(struct altera_pcie *pcie) +-{ +- return !!(cra_readl(pcie, RP_LTSSM) & LTSSM_L0); +-} +- + static bool altera_pcie_valid_config(struct altera_pcie *pcie, + struct pci_bus *bus, int dev) + { +@@ -286,22 +271,14 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn, + return PCIBIOS_SUCCESSFUL; + } + +-static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn, +- int where, int size, u32 *value) ++static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno, ++ unsigned int devfn, int where, int size, ++ u32 *value) + { +- struct altera_pcie *pcie = bus->sysdata; + int ret; + u32 data; + u8 byte_en; + +- if (altera_pcie_hide_rc_bar(bus, devfn, where)) +- return PCIBIOS_BAD_REGISTER_NUMBER; +- +- if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn))) { +- *value = 0xffffffff; +- return PCIBIOS_DEVICE_NOT_FOUND; +- } +- + switch (size) { + case 1: + byte_en = 1 << (where & 3); +@@ -314,7 +291,7 @@ static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn, + break; + } + +- ret = tlp_cfg_dword_read(pcie, bus->number, devfn, ++ ret = tlp_cfg_dword_read(pcie, busno, devfn, + (where & ~DWORD_MASK), byte_en, &data); + if (ret != PCIBIOS_SUCCESSFUL) + return ret; +@@ -334,20 +311,14 @@ static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn, + return PCIBIOS_SUCCESSFUL; + } + +-static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn, +- int where, int size, u32 value) ++static int _altera_pcie_cfg_write(struct altera_pcie *pcie, u8 busno, ++ unsigned int devfn, int where, int size, ++ u32 value) + { +- struct altera_pcie *pcie = bus->sysdata; + u32 data32; + u32 shift = 8 * (where & 3); + u8 byte_en; + +- if (altera_pcie_hide_rc_bar(bus, devfn, where)) +- return PCIBIOS_BAD_REGISTER_NUMBER; +- +- if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn))) +- return PCIBIOS_DEVICE_NOT_FOUND; +- + switch (size) { + case 1: + data32 = (value & 0xff) << shift; +@@ -363,8 +334,40 @@ static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn, + break; + } + +- return tlp_cfg_dword_write(pcie, bus->number, devfn, +- (where & ~DWORD_MASK), byte_en, data32); ++ return tlp_cfg_dword_write(pcie, busno, devfn, (where & ~DWORD_MASK), ++ byte_en, data32); ++} ++ ++static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 *value) ++{ ++ struct altera_pcie *pcie = bus->sysdata; ++ ++ if (altera_pcie_hide_rc_bar(bus, devfn, where)) ++ return PCIBIOS_BAD_REGISTER_NUMBER; ++ ++ if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn))) { ++ *value = 0xffffffff; ++ return PCIBIOS_DEVICE_NOT_FOUND; ++ } ++ ++ return _altera_pcie_cfg_read(pcie, bus->number, devfn, where, size, ++ value); ++} ++ ++static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 value) ++{ ++ struct altera_pcie *pcie = bus->sysdata; ++ ++ if (altera_pcie_hide_rc_bar(bus, devfn, where)) ++ return PCIBIOS_BAD_REGISTER_NUMBER; ++ ++ if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn))) ++ return PCIBIOS_DEVICE_NOT_FOUND; ++ ++ return _altera_pcie_cfg_write(pcie, bus->number, devfn, where, size, ++ value); + } + + static struct pci_ops altera_pcie_ops = { +@@ -372,6 +375,90 @@ static struct pci_ops altera_pcie_ops = { + .write = altera_pcie_cfg_write, + }; + ++static int altera_read_cap_word(struct altera_pcie *pcie, u8 busno, ++ unsigned int devfn, int offset, u16 *value) ++{ ++ u32 data; ++ int ret; ++ ++ ret = _altera_pcie_cfg_read(pcie, busno, devfn, ++ PCIE_CAP_OFFSET + offset, sizeof(*value), ++ &data); ++ *value = data; ++ return ret; ++} ++ ++static int altera_write_cap_word(struct altera_pcie *pcie, u8 busno, ++ unsigned int devfn, int offset, u16 value) ++{ ++ return _altera_pcie_cfg_write(pcie, busno, devfn, ++ PCIE_CAP_OFFSET + offset, sizeof(value), ++ value); ++} ++ ++static void altera_wait_link_retrain(struct altera_pcie *pcie) ++{ ++ u16 reg16; ++ unsigned long start_jiffies; ++ ++ /* Wait for link training end. */ ++ start_jiffies = jiffies; ++ for (;;) { ++ altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, ++ PCI_EXP_LNKSTA, ®16); ++ if (!(reg16 & PCI_EXP_LNKSTA_LT)) ++ break; ++ ++ if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) { ++ dev_err(&pcie->pdev->dev, "link retrain timeout\n"); ++ break; ++ } ++ udelay(100); ++ } ++ ++ /* Wait for link is up */ ++ start_jiffies = jiffies; ++ for (;;) { ++ if (altera_pcie_link_is_up(pcie)) ++ break; ++ ++ if (time_after(jiffies, start_jiffies + LINK_UP_TIMEOUT)) { ++ dev_err(&pcie->pdev->dev, "link up timeout\n"); ++ break; ++ } ++ udelay(100); ++ } ++} ++ ++static void altera_pcie_retrain(struct altera_pcie *pcie) ++{ ++ u16 linkcap, linkstat, linkctl; ++ ++ if (!altera_pcie_link_is_up(pcie)) ++ return; ++ ++ /* ++ * Set the retrain bit if the PCIe rootport support > 2.5GB/s, but ++ * current speed is 2.5 GB/s. ++ */ ++ altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKCAP, ++ &linkcap); ++ if ((linkcap & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB) ++ return; ++ ++ altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKSTA, ++ &linkstat); ++ if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) { ++ altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, ++ PCI_EXP_LNKCTL, &linkctl); ++ linkctl |= PCI_EXP_LNKCTL_RL; ++ altera_write_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, ++ PCI_EXP_LNKCTL, linkctl); ++ ++ altera_wait_link_retrain(pcie); ++ } ++} ++ + static int altera_pcie_intx_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) + { +@@ -506,6 +593,11 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie) + return 0; + } + ++static void altera_pcie_host_init(struct altera_pcie *pcie) ++{ ++ altera_pcie_retrain(pcie); ++} ++ + static int altera_pcie_probe(struct platform_device *pdev) + { + struct altera_pcie *pcie; +@@ -543,6 +635,7 @@ static int altera_pcie_probe(struct platform_device *pdev) + cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS); + /* enable all interrupts */ + cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE); ++ altera_pcie_host_init(pcie); + + bus = pci_scan_root_bus(&pdev->dev, pcie->root_bus_nr, &altera_pcie_ops, + pcie, &pcie->resources); +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c +index 3919ea066bf9..736de1021d8b 100644 +--- a/drivers/usb/class/cdc-acm.c ++++ b/drivers/usb/class/cdc-acm.c +@@ -1885,6 +1885,13 @@ static const struct usb_device_id acm_ids[] = { + .driver_info = IGNORE_DEVICE, + }, + ++ { USB_DEVICE(0x1bc7, 0x0021), /* Telit 3G ACM only composition */ ++ .driver_info = SEND_ZERO_PACKET, ++ }, ++ { USB_DEVICE(0x1bc7, 0x0023), /* Telit 3G ACM + ECM composition */ ++ .driver_info = SEND_ZERO_PACKET, ++ }, ++ + /* control interfaces without any protocol set */ + { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, + USB_CDC_PROTO_NONE) }, +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c +index cf378b1ed373..733479ddf8a7 100644 +--- a/drivers/usb/core/quirks.c ++++ b/drivers/usb/core/quirks.c +@@ -240,7 +240,8 @@ static const struct usb_device_id usb_quirk_list[] = { + USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, + + /* Corsair K70 RGB */ +- { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT }, ++ { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT | ++ USB_QUIRK_DELAY_CTRL_MSG }, + + /* Corsair Strafe */ + { USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT | +diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c +index 6c186b4df94a..b3344a77dcce 100644 +--- a/drivers/usb/storage/scsiglue.c ++++ b/drivers/usb/storage/scsiglue.c +@@ -223,8 +223,12 @@ static int slave_configure(struct scsi_device *sdev) + if (!(us->fflags & US_FL_NEEDS_CAP16)) + sdev->try_rc_10_first = 1; + +- /* assume SPC3 or latter devices support sense size > 18 */ +- if (sdev->scsi_level > SCSI_SPC_2) ++ /* ++ * assume SPC3 or latter devices support sense size > 18 ++ * unless US_FL_BAD_SENSE quirk is specified. ++ */ ++ if (sdev->scsi_level > SCSI_SPC_2 && ++ !(us->fflags & US_FL_BAD_SENSE)) + us->fflags |= US_FL_SANE_SENSE; + + /* USB-IDE bridges tend to report SK = 0x04 (Non-recoverable +diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h +index 898215cad351..d92b974f0635 100644 +--- a/drivers/usb/storage/unusual_devs.h ++++ b/drivers/usb/storage/unusual_devs.h +@@ -1392,6 +1392,18 @@ UNUSUAL_DEV( 0x0d49, 0x7310, 0x0000, 0x9999, + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_SANE_SENSE), + ++/* ++ * Reported by Icenowy Zheng <icen...@aosc.io> ++ * The SMI SM3350 USB-UFS bridge controller will enter a wrong state ++ * that do not process read/write command if a long sense is requested, ++ * so force to use 18-byte sense. ++ */ ++UNUSUAL_DEV( 0x090c, 0x3350, 0x0000, 0xffff, ++ "SMI", ++ "SM3350 UFS-to-USB-Mass-Storage bridge", ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL, ++ US_FL_BAD_SENSE ), ++ + /* + * Pete Zaitcev <zait...@yahoo.com>, bz#164688. + * The device blatantly ignores LUN and returns 1 in GetMaxLUN. +diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile +index 6d1d0b93b1aa..c792df826e12 100644 +--- a/fs/btrfs/Makefile ++++ b/fs/btrfs/Makefile +@@ -9,7 +9,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ + export.o tree-log.o free-space-cache.o zlib.o lzo.o \ + compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \ + reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \ +- uuid-tree.o props.o hash.o ++ uuid-tree.o props.o hash.o tree-checker.o + + btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o + btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o +diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c +index 38ee08675468..8f4baa3cb992 100644 +--- a/fs/btrfs/ctree.c ++++ b/fs/btrfs/ctree.c +@@ -1726,20 +1726,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, + return err; + } + +-/* +- * The leaf data grows from end-to-front in the node. +- * this returns the address of the start of the last item, +- * which is the stop of the leaf data stack +- */ +-static inline unsigned int leaf_data_end(struct btrfs_root *root, +- struct extent_buffer *leaf) +-{ +- u32 nr = btrfs_header_nritems(leaf); +- if (nr == 0) +- return BTRFS_LEAF_DATA_SIZE(root); +- return btrfs_item_offset_nr(leaf, nr - 1); +-} +- + + /* + * search for key in the extent_buffer. The items start at offset p, +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h +index e847573c6db0..4a91d3119e59 100644 +--- a/fs/btrfs/ctree.h ++++ b/fs/btrfs/ctree.h +@@ -35,6 +35,7 @@ + #include <linux/btrfs.h> + #include <linux/workqueue.h> + #include <linux/security.h> ++#include <linux/sizes.h> + #include "extent_io.h" + #include "extent_map.h" + #include "async-thread.h" +@@ -897,6 +898,7 @@ struct btrfs_balance_item { + #define BTRFS_FILE_EXTENT_INLINE 0 + #define BTRFS_FILE_EXTENT_REG 1 + #define BTRFS_FILE_EXTENT_PREALLOC 2 ++#define BTRFS_FILE_EXTENT_TYPES 2 + + struct btrfs_file_extent_item { + /* +@@ -2283,7 +2285,7 @@ do { \ + #define BTRFS_INODE_ROOT_ITEM_INIT (1 << 31) + + struct btrfs_map_token { +- struct extent_buffer *eb; ++ const struct extent_buffer *eb; + char *kaddr; + unsigned long offset; + }; +@@ -2314,18 +2316,19 @@ static inline void btrfs_init_map_token (struct btrfs_map_token *token) + sizeof(((type *)0)->member))) + + #define DECLARE_BTRFS_SETGET_BITS(bits) \ +-u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \ +- unsigned long off, \ +- struct btrfs_map_token *token); \ +-void btrfs_set_token_##bits(struct extent_buffer *eb, void *ptr, \ ++u##bits btrfs_get_token_##bits(const struct extent_buffer *eb, \ ++ const void *ptr, unsigned long off, \ ++ struct btrfs_map_token *token); \ ++void btrfs_set_token_##bits(struct extent_buffer *eb, const void *ptr, \ + unsigned long off, u##bits val, \ + struct btrfs_map_token *token); \ +-static inline u##bits btrfs_get_##bits(struct extent_buffer *eb, void *ptr, \ ++static inline u##bits btrfs_get_##bits(const struct extent_buffer *eb, \ ++ const void *ptr, \ + unsigned long off) \ + { \ + return btrfs_get_token_##bits(eb, ptr, off, NULL); \ + } \ +-static inline void btrfs_set_##bits(struct extent_buffer *eb, void *ptr, \ ++static inline void btrfs_set_##bits(struct extent_buffer *eb, void *ptr,\ + unsigned long off, u##bits val) \ + { \ + btrfs_set_token_##bits(eb, ptr, off, val, NULL); \ +@@ -2337,7 +2340,8 @@ DECLARE_BTRFS_SETGET_BITS(32) + DECLARE_BTRFS_SETGET_BITS(64) + + #define BTRFS_SETGET_FUNCS(name, type, member, bits) \ +-static inline u##bits btrfs_##name(struct extent_buffer *eb, type *s) \ ++static inline u##bits btrfs_##name(const struct extent_buffer *eb, \ ++ const type *s) \ + { \ + BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ + return btrfs_get_##bits(eb, s, offsetof(type, member)); \ +@@ -2348,7 +2352,8 @@ static inline void btrfs_set_##name(struct extent_buffer *eb, type *s, \ + BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ + btrfs_set_##bits(eb, s, offsetof(type, member), val); \ + } \ +-static inline u##bits btrfs_token_##name(struct extent_buffer *eb, type *s, \ ++static inline u##bits btrfs_token_##name(const struct extent_buffer *eb,\ ++ const type *s, \ + struct btrfs_map_token *token) \ + { \ + BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ +@@ -2363,9 +2368,9 @@ static inline void btrfs_set_token_##name(struct extent_buffer *eb, \ + } + + #define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \ +-static inline u##bits btrfs_##name(struct extent_buffer *eb) \ ++static inline u##bits btrfs_##name(const struct extent_buffer *eb) \ + { \ +- type *p = page_address(eb->pages[0]); \ ++ const type *p = page_address(eb->pages[0]); \ + u##bits res = le##bits##_to_cpu(p->member); \ + return res; \ + } \ +@@ -2377,7 +2382,7 @@ static inline void btrfs_set_##name(struct extent_buffer *eb, \ + } + + #define BTRFS_SETGET_STACK_FUNCS(name, type, member, bits) \ +-static inline u##bits btrfs_##name(type *s) \ ++static inline u##bits btrfs_##name(const type *s) \ + { \ + return le##bits##_to_cpu(s->member); \ + } \ +@@ -2678,7 +2683,7 @@ static inline unsigned long btrfs_node_key_ptr_offset(int nr) + sizeof(struct btrfs_key_ptr) * nr; + } + +-void btrfs_node_key(struct extent_buffer *eb, ++void btrfs_node_key(const struct extent_buffer *eb, + struct btrfs_disk_key *disk_key, int nr); + + static inline void btrfs_set_node_key(struct extent_buffer *eb, +@@ -2707,28 +2712,28 @@ static inline struct btrfs_item *btrfs_item_nr(int nr) + return (struct btrfs_item *)btrfs_item_nr_offset(nr); + } + +-static inline u32 btrfs_item_end(struct extent_buffer *eb, ++static inline u32 btrfs_item_end(const struct extent_buffer *eb, + struct btrfs_item *item) + { + return btrfs_item_offset(eb, item) + btrfs_item_size(eb, item); + } + +-static inline u32 btrfs_item_end_nr(struct extent_buffer *eb, int nr) ++static inline u32 btrfs_item_end_nr(const struct extent_buffer *eb, int nr) + { + return btrfs_item_end(eb, btrfs_item_nr(nr)); + } + +-static inline u32 btrfs_item_offset_nr(struct extent_buffer *eb, int nr) ++static inline u32 btrfs_item_offset_nr(const struct extent_buffer *eb, int nr) + { + return btrfs_item_offset(eb, btrfs_item_nr(nr)); + } + +-static inline u32 btrfs_item_size_nr(struct extent_buffer *eb, int nr) ++static inline u32 btrfs_item_size_nr(const struct extent_buffer *eb, int nr) + { + return btrfs_item_size(eb, btrfs_item_nr(nr)); + } + +-static inline void btrfs_item_key(struct extent_buffer *eb, ++static inline void btrfs_item_key(const struct extent_buffer *eb, + struct btrfs_disk_key *disk_key, int nr) + { + struct btrfs_item *item = btrfs_item_nr(nr); +@@ -2764,8 +2769,8 @@ BTRFS_SETGET_STACK_FUNCS(stack_dir_name_len, struct btrfs_dir_item, + BTRFS_SETGET_STACK_FUNCS(stack_dir_transid, struct btrfs_dir_item, + transid, 64); + +-static inline void btrfs_dir_item_key(struct extent_buffer *eb, +- struct btrfs_dir_item *item, ++static inline void btrfs_dir_item_key(const struct extent_buffer *eb, ++ const struct btrfs_dir_item *item, + struct btrfs_disk_key *key) + { + read_eb_member(eb, item, struct btrfs_dir_item, location, key); +@@ -2773,7 +2778,7 @@ static inline void btrfs_dir_item_key(struct extent_buffer *eb, + + static inline void btrfs_set_dir_item_key(struct extent_buffer *eb, + struct btrfs_dir_item *item, +- struct btrfs_disk_key *key) ++ const struct btrfs_disk_key *key) + { + write_eb_member(eb, item, struct btrfs_dir_item, location, key); + } +@@ -2785,8 +2790,8 @@ BTRFS_SETGET_FUNCS(free_space_bitmaps, struct btrfs_free_space_header, + BTRFS_SETGET_FUNCS(free_space_generation, struct btrfs_free_space_header, + generation, 64); + +-static inline void btrfs_free_space_key(struct extent_buffer *eb, +- struct btrfs_free_space_header *h, ++static inline void btrfs_free_space_key(const struct extent_buffer *eb, ++ const struct btrfs_free_space_header *h, + struct btrfs_disk_key *key) + { + read_eb_member(eb, h, struct btrfs_free_space_header, location, key); +@@ -2794,7 +2799,7 @@ static inline void btrfs_free_space_key(struct extent_buffer *eb, + + static inline void btrfs_set_free_space_key(struct extent_buffer *eb, + struct btrfs_free_space_header *h, +- struct btrfs_disk_key *key) ++ const struct btrfs_disk_key *key) + { + write_eb_member(eb, h, struct btrfs_free_space_header, location, key); + } +@@ -2821,25 +2826,25 @@ static inline void btrfs_cpu_key_to_disk(struct btrfs_disk_key *disk, + disk->objectid = cpu_to_le64(cpu->objectid); + } + +-static inline void btrfs_node_key_to_cpu(struct extent_buffer *eb, +- struct btrfs_key *key, int nr) ++static inline void btrfs_node_key_to_cpu(const struct extent_buffer *eb, ++ struct btrfs_key *key, int nr) + { + struct btrfs_disk_key disk_key; + btrfs_node_key(eb, &disk_key, nr); + btrfs_disk_key_to_cpu(key, &disk_key); + } + +-static inline void btrfs_item_key_to_cpu(struct extent_buffer *eb, +- struct btrfs_key *key, int nr) ++static inline void btrfs_item_key_to_cpu(const struct extent_buffer *eb, ++ struct btrfs_key *key, int nr) + { + struct btrfs_disk_key disk_key; + btrfs_item_key(eb, &disk_key, nr); + btrfs_disk_key_to_cpu(key, &disk_key); + } + +-static inline void btrfs_dir_item_key_to_cpu(struct extent_buffer *eb, +- struct btrfs_dir_item *item, +- struct btrfs_key *key) ++static inline void btrfs_dir_item_key_to_cpu(const struct extent_buffer *eb, ++ const struct btrfs_dir_item *item, ++ struct btrfs_key *key) + { + struct btrfs_disk_key disk_key; + btrfs_dir_item_key(eb, item, &disk_key); +@@ -2872,7 +2877,7 @@ BTRFS_SETGET_STACK_FUNCS(stack_header_nritems, struct btrfs_header, + nritems, 32); + BTRFS_SETGET_STACK_FUNCS(stack_header_bytenr, struct btrfs_header, bytenr, 64); + +-static inline int btrfs_header_flag(struct extent_buffer *eb, u64 flag) ++static inline int btrfs_header_flag(const struct extent_buffer *eb, u64 flag) + { + return (btrfs_header_flags(eb) & flag) == flag; + } +@@ -2891,7 +2896,7 @@ static inline int btrfs_clear_header_flag(struct extent_buffer *eb, u64 flag) + return (flags & flag) == flag; + } + +-static inline int btrfs_header_backref_rev(struct extent_buffer *eb) ++static inline int btrfs_header_backref_rev(const struct extent_buffer *eb) + { + u64 flags = btrfs_header_flags(eb); + return flags >> BTRFS_BACKREF_REV_SHIFT; +@@ -2911,12 +2916,12 @@ static inline unsigned long btrfs_header_fsid(void) + return offsetof(struct btrfs_header, fsid); + } + +-static inline unsigned long btrfs_header_chunk_tree_uuid(struct extent_buffer *eb) ++static inline unsigned long btrfs_header_chunk_tree_uuid(const struct extent_buffer *eb) + { + return offsetof(struct btrfs_header, chunk_tree_uuid); + } + +-static inline int btrfs_is_leaf(struct extent_buffer *eb) ++static inline int btrfs_is_leaf(const struct extent_buffer *eb) + { + return btrfs_header_level(eb) == 0; + } +@@ -2950,12 +2955,12 @@ BTRFS_SETGET_STACK_FUNCS(root_stransid, struct btrfs_root_item, + BTRFS_SETGET_STACK_FUNCS(root_rtransid, struct btrfs_root_item, + rtransid, 64); + +-static inline bool btrfs_root_readonly(struct btrfs_root *root) ++static inline bool btrfs_root_readonly(const struct btrfs_root *root) + { + return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_RDONLY)) != 0; + } + +-static inline bool btrfs_root_dead(struct btrfs_root *root) ++static inline bool btrfs_root_dead(const struct btrfs_root *root) + { + return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_DEAD)) != 0; + } +@@ -3012,51 +3017,51 @@ BTRFS_SETGET_STACK_FUNCS(backup_num_devices, struct btrfs_root_backup, + /* struct btrfs_balance_item */ + BTRFS_SETGET_FUNCS(balance_flags, struct btrfs_balance_item, flags, 64); + +-static inline void btrfs_balance_data(struct extent_buffer *eb, +- struct btrfs_balance_item *bi, ++static inline void btrfs_balance_data(const struct extent_buffer *eb, ++ const struct btrfs_balance_item *bi, + struct btrfs_disk_balance_args *ba) + { + read_eb_member(eb, bi, struct btrfs_balance_item, data, ba); + } + + static inline void btrfs_set_balance_data(struct extent_buffer *eb, +- struct btrfs_balance_item *bi, +- struct btrfs_disk_balance_args *ba) ++ struct btrfs_balance_item *bi, ++ const struct btrfs_disk_balance_args *ba) + { + write_eb_member(eb, bi, struct btrfs_balance_item, data, ba); + } + +-static inline void btrfs_balance_meta(struct extent_buffer *eb, +- struct btrfs_balance_item *bi, ++static inline void btrfs_balance_meta(const struct extent_buffer *eb, ++ const struct btrfs_balance_item *bi, + struct btrfs_disk_balance_args *ba) + { + read_eb_member(eb, bi, struct btrfs_balance_item, meta, ba); + } + + static inline void btrfs_set_balance_meta(struct extent_buffer *eb, +- struct btrfs_balance_item *bi, +- struct btrfs_disk_balance_args *ba) ++ struct btrfs_balance_item *bi, ++ const struct btrfs_disk_balance_args *ba) + { + write_eb_member(eb, bi, struct btrfs_balance_item, meta, ba); + } + +-static inline void btrfs_balance_sys(struct extent_buffer *eb, +- struct btrfs_balance_item *bi, ++static inline void btrfs_balance_sys(const struct extent_buffer *eb, ++ const struct btrfs_balance_item *bi, + struct btrfs_disk_balance_args *ba) + { + read_eb_member(eb, bi, struct btrfs_balance_item, sys, ba); + } + + static inline void btrfs_set_balance_sys(struct extent_buffer *eb, +- struct btrfs_balance_item *bi, +- struct btrfs_disk_balance_args *ba) ++ struct btrfs_balance_item *bi, ++ const struct btrfs_disk_balance_args *ba) + { + write_eb_member(eb, bi, struct btrfs_balance_item, sys, ba); + } + + static inline void + btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu, +- struct btrfs_disk_balance_args *disk) ++ const struct btrfs_disk_balance_args *disk) + { + memset(cpu, 0, sizeof(*cpu)); + +@@ -3076,7 +3081,7 @@ btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu, + + static inline void + btrfs_cpu_balance_args_to_disk(struct btrfs_disk_balance_args *disk, +- struct btrfs_balance_args *cpu) ++ const struct btrfs_balance_args *cpu) + { + memset(disk, 0, sizeof(*disk)); + +@@ -3144,7 +3149,7 @@ BTRFS_SETGET_STACK_FUNCS(super_magic, struct btrfs_super_block, magic, 64); + BTRFS_SETGET_STACK_FUNCS(super_uuid_tree_generation, struct btrfs_super_block, + uuid_tree_generation, 64); + +-static inline int btrfs_super_csum_size(struct btrfs_super_block *s) ++static inline int btrfs_super_csum_size(const struct btrfs_super_block *s) + { + u16 t = btrfs_super_csum_type(s); + /* +@@ -3158,6 +3163,21 @@ static inline unsigned long btrfs_leaf_data(struct extent_buffer *l) + return offsetof(struct btrfs_leaf, items); + } + ++/* ++ * The leaf data grows from end-to-front in the node. ++ * this returns the address of the start of the last item, ++ * which is the stop of the leaf data stack ++ */ ++static inline unsigned int leaf_data_end(const struct btrfs_root *root, ++ const struct extent_buffer *leaf) ++{ ++ u32 nr = btrfs_header_nritems(leaf); ++ ++ if (nr == 0) ++ return BTRFS_LEAF_DATA_SIZE(root); ++ return btrfs_item_offset_nr(leaf, nr - 1); ++} ++ + /* struct btrfs_file_extent_item */ + BTRFS_SETGET_FUNCS(file_extent_type, struct btrfs_file_extent_item, type, 8); + BTRFS_SETGET_STACK_FUNCS(stack_file_extent_disk_bytenr, +@@ -3174,7 +3194,7 @@ BTRFS_SETGET_STACK_FUNCS(stack_file_extent_compression, + struct btrfs_file_extent_item, compression, 8); + + static inline unsigned long +-btrfs_file_extent_inline_start(struct btrfs_file_extent_item *e) ++btrfs_file_extent_inline_start(const struct btrfs_file_extent_item *e) + { + return (unsigned long)e + BTRFS_FILE_EXTENT_INLINE_DATA_START; + } +@@ -3208,8 +3228,9 @@ BTRFS_SETGET_FUNCS(file_extent_other_encoding, struct btrfs_file_extent_item, + * size of any extent headers. If a file is compressed on disk, this is + * the compressed size + */ +-static inline u32 btrfs_file_extent_inline_item_len(struct extent_buffer *eb, +- struct btrfs_item *e) ++static inline u32 btrfs_file_extent_inline_item_len( ++ const struct extent_buffer *eb, ++ struct btrfs_item *e) + { + return btrfs_item_size(eb, e) - BTRFS_FILE_EXTENT_INLINE_DATA_START; + } +@@ -3217,9 +3238,9 @@ static inline u32 btrfs_file_extent_inline_item_len(struct extent_buffer *eb, + /* this returns the number of file bytes represented by the inline item. + * If an item is compressed, this is the uncompressed size + */ +-static inline u32 btrfs_file_extent_inline_len(struct extent_buffer *eb, +- int slot, +- struct btrfs_file_extent_item *fi) ++static inline u32 btrfs_file_extent_inline_len(const struct extent_buffer *eb, ++ int slot, ++ const struct btrfs_file_extent_item *fi) + { + struct btrfs_map_token token; + +@@ -3241,8 +3262,8 @@ static inline u32 btrfs_file_extent_inline_len(struct extent_buffer *eb, + + + /* btrfs_dev_stats_item */ +-static inline u64 btrfs_dev_stats_value(struct extent_buffer *eb, +- struct btrfs_dev_stats_item *ptr, ++static inline u64 btrfs_dev_stats_value(const struct extent_buffer *eb, ++ const struct btrfs_dev_stats_item *ptr, + int index) + { + u64 val; +diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c +index 176a27bc63aa..81e5bc62e8e3 100644 +--- a/fs/btrfs/dev-replace.c ++++ b/fs/btrfs/dev-replace.c +@@ -620,7 +620,7 @@ static void btrfs_dev_replace_update_device_in_mapping_tree( + em = lookup_extent_mapping(em_tree, start, (u64)-1); + if (!em) + break; +- map = (struct map_lookup *)em->bdev; ++ map = em->map_lookup; + for (i = 0; i < map->num_stripes; i++) + if (srcdev == map->stripes[i].dev) + map->stripes[i].dev = tgtdev; +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index 1f21c6c33228..f80a0af68736 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -49,6 +49,7 @@ + #include "raid56.h" + #include "sysfs.h" + #include "qgroup.h" ++#include "tree-checker.h" + + #ifdef CONFIG_X86 + #include <asm/cpufeature.h> +@@ -522,72 +523,6 @@ static int check_tree_block_fsid(struct btrfs_fs_info *fs_info, + return ret; + } + +-#define CORRUPT(reason, eb, root, slot) \ +- btrfs_crit(root->fs_info, "corrupt leaf, %s: block=%llu," \ +- "root=%llu, slot=%d", reason, \ +- btrfs_header_bytenr(eb), root->objectid, slot) +- +-static noinline int check_leaf(struct btrfs_root *root, +- struct extent_buffer *leaf) +-{ +- struct btrfs_key key; +- struct btrfs_key leaf_key; +- u32 nritems = btrfs_header_nritems(leaf); +- int slot; +- +- if (nritems == 0) +- return 0; +- +- /* Check the 0 item */ +- if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) != +- BTRFS_LEAF_DATA_SIZE(root)) { +- CORRUPT("invalid item offset size pair", leaf, root, 0); +- return -EIO; +- } +- +- /* +- * Check to make sure each items keys are in the correct order and their +- * offsets make sense. We only have to loop through nritems-1 because +- * we check the current slot against the next slot, which verifies the +- * next slot's offset+size makes sense and that the current's slot +- * offset is correct. +- */ +- for (slot = 0; slot < nritems - 1; slot++) { +- btrfs_item_key_to_cpu(leaf, &leaf_key, slot); +- btrfs_item_key_to_cpu(leaf, &key, slot + 1); +- +- /* Make sure the keys are in the right order */ +- if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) { +- CORRUPT("bad key order", leaf, root, slot); +- return -EIO; +- } +- +- /* +- * Make sure the offset and ends are right, remember that the +- * item data starts at the end of the leaf and grows towards the +- * front. +- */ +- if (btrfs_item_offset_nr(leaf, slot) != +- btrfs_item_end_nr(leaf, slot + 1)) { +- CORRUPT("slot offset bad", leaf, root, slot); +- return -EIO; +- } +- +- /* +- * Check to make sure that we don't point outside of the leaf, +- * just incase all the items are consistent to eachother, but +- * all point outside of the leaf. +- */ +- if (btrfs_item_end_nr(leaf, slot) > +- BTRFS_LEAF_DATA_SIZE(root)) { +- CORRUPT("slot end outside of leaf", leaf, root, slot); +- return -EIO; +- } +- } +- +- return 0; +-} +- + static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio, + u64 phy_offset, struct page *page, + u64 start, u64 end, int mirror) +@@ -654,11 +589,14 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio, + * that we don't try and read the other copies of this block, just + * return -EIO. + */ +- if (found_level == 0 && check_leaf(root, eb)) { ++ if (found_level == 0 && btrfs_check_leaf_full(root, eb)) { + set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); + ret = -EIO; + } + ++ if (found_level > 0 && btrfs_check_node(root, eb)) ++ ret = -EIO; ++ + if (!ret) + set_extent_buffer_uptodate(eb); + err: +@@ -3958,7 +3896,13 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf) + buf->len, + root->fs_info->dirty_metadata_batch); + #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY +- if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) { ++ /* ++ * Since btrfs_mark_buffer_dirty() can be called with item pointer set ++ * but item data not updated. ++ * So here we should only check item pointers, not item data. ++ */ ++ if (btrfs_header_level(buf) == 0 && ++ btrfs_check_leaf_relaxed(root, buf)) { + btrfs_print_leaf(root, buf); + ASSERT(0); + } +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c +index 13ff0fdae03e..978bbfed5a2c 100644 +--- a/fs/btrfs/extent-tree.c ++++ b/fs/btrfs/extent-tree.c +@@ -2342,7 +2342,13 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans, + ins.type = BTRFS_EXTENT_ITEM_KEY; + } + +- BUG_ON(node->ref_mod != 1); ++ if (node->ref_mod != 1) { ++ btrfs_err(root->fs_info, ++ "btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu", ++ node->bytenr, node->ref_mod, node->action, ref_root, ++ parent); ++ return -EIO; ++ } + if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { + BUG_ON(!extent_op || !extent_op->update_flags); + ret = alloc_reserved_tree_block(trans, root, +@@ -9481,6 +9487,8 @@ static int find_first_block_group(struct btrfs_root *root, + int ret = 0; + struct btrfs_key found_key; + struct extent_buffer *leaf; ++ struct btrfs_block_group_item bg; ++ u64 flags; + int slot; + + ret = btrfs_search_slot(NULL, root, key, path, 0, 0); +@@ -9502,7 +9510,47 @@ static int find_first_block_group(struct btrfs_root *root, + + if (found_key.objectid >= key->objectid && + found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { +- ret = 0; ++ struct extent_map_tree *em_tree; ++ struct extent_map *em; ++ ++ em_tree = &root->fs_info->mapping_tree.map_tree; ++ read_lock(&em_tree->lock); ++ em = lookup_extent_mapping(em_tree, found_key.objectid, ++ found_key.offset); ++ read_unlock(&em_tree->lock); ++ if (!em) { ++ btrfs_err(root->fs_info, ++ "logical %llu len %llu found bg but no related chunk", ++ found_key.objectid, found_key.offset); ++ ret = -ENOENT; ++ } else if (em->start != found_key.objectid || ++ em->len != found_key.offset) { ++ btrfs_err(root->fs_info, ++ "block group %llu len %llu mismatch with chunk %llu len %llu", ++ found_key.objectid, found_key.offset, ++ em->start, em->len); ++ ret = -EUCLEAN; ++ } else { ++ read_extent_buffer(leaf, &bg, ++ btrfs_item_ptr_offset(leaf, slot), ++ sizeof(bg)); ++ flags = btrfs_block_group_flags(&bg) & ++ BTRFS_BLOCK_GROUP_TYPE_MASK; ++ ++ if (flags != (em->map_lookup->type & ++ BTRFS_BLOCK_GROUP_TYPE_MASK)) { ++ btrfs_err(root->fs_info, ++"block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx", ++ found_key.objectid, ++ found_key.offset, flags, ++ (BTRFS_BLOCK_GROUP_TYPE_MASK & ++ em->map_lookup->type)); ++ ret = -EUCLEAN; ++ } else { ++ ret = 0; ++ } ++ } ++ free_extent_map(em); + goto out; + } + path->slots[0]++; +@@ -9717,6 +9765,62 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size) + return cache; + } + ++ ++/* ++ * Iterate all chunks and verify that each of them has the corresponding block ++ * group ++ */ ++static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info) ++{ ++ struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; ++ struct extent_map *em; ++ struct btrfs_block_group_cache *bg; ++ u64 start = 0; ++ int ret = 0; ++ ++ while (1) { ++ read_lock(&map_tree->map_tree.lock); ++ /* ++ * lookup_extent_mapping will return the first extent map ++ * intersecting the range, so setting @len to 1 is enough to ++ * get the first chunk. ++ */ ++ em = lookup_extent_mapping(&map_tree->map_tree, start, 1); ++ read_unlock(&map_tree->map_tree.lock); ++ if (!em) ++ break; ++ ++ bg = btrfs_lookup_block_group(fs_info, em->start); ++ if (!bg) { ++ btrfs_err(fs_info, ++ "chunk start=%llu len=%llu doesn't have corresponding block group", ++ em->start, em->len); ++ ret = -EUCLEAN; ++ free_extent_map(em); ++ break; ++ } ++ if (bg->key.objectid != em->start || ++ bg->key.offset != em->len || ++ (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) != ++ (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { ++ btrfs_err(fs_info, ++"chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx", ++ em->start, em->len, ++ em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK, ++ bg->key.objectid, bg->key.offset, ++ bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK); ++ ret = -EUCLEAN; ++ free_extent_map(em); ++ btrfs_put_block_group(bg); ++ break; ++ } ++ start = em->start + em->len; ++ free_extent_map(em); ++ btrfs_put_block_group(bg); ++ } ++ return ret; ++} ++ + int btrfs_read_block_groups(struct btrfs_root *root) + { + struct btrfs_path *path; +@@ -9903,7 +10007,7 @@ int btrfs_read_block_groups(struct btrfs_root *root) + } + + init_global_block_rsv(info); +- ret = 0; ++ ret = check_chunk_block_group_mappings(info); + error: + btrfs_free_path(path); + return ret; +@@ -10388,7 +10492,7 @@ btrfs_start_trans_remove_block_group(struct btrfs_fs_info *fs_info, + * more device items and remove one chunk item), but this is done at + * btrfs_remove_chunk() through a call to check_system_chunk(). + */ +- map = (struct map_lookup *)em->bdev; ++ map = em->map_lookup; + num_items = 3 + map->num_stripes; + free_extent_map(em); + +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c +index 88bee6703cc0..42e7f6a8f91d 100644 +--- a/fs/btrfs/extent_io.c ++++ b/fs/btrfs/extent_io.c +@@ -3847,8 +3847,10 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb, + struct block_device *bdev = fs_info->fs_devices->latest_bdev; + struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree; + u64 offset = eb->start; ++ u32 nritems; + unsigned long i, num_pages; + unsigned long bio_flags = 0; ++ unsigned long start, end; + int rw = (epd->sync_io ? WRITE_SYNC : WRITE) | REQ_META; + int ret = 0; + +@@ -3858,6 +3860,23 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb, + if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID) + bio_flags = EXTENT_BIO_TREE_LOG; + ++ /* set btree blocks beyond nritems with 0 to avoid stale content. */ ++ nritems = btrfs_header_nritems(eb); ++ if (btrfs_header_level(eb) > 0) { ++ end = btrfs_node_key_ptr_offset(nritems); ++ ++ memset_extent_buffer(eb, 0, end, eb->len - end); ++ } else { ++ /* ++ * leaf: ++ * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0 ++ */ ++ start = btrfs_item_nr_offset(nritems); ++ end = btrfs_leaf_data(eb) + ++ leaf_data_end(fs_info->tree_root, eb); ++ memset_extent_buffer(eb, 0, start, end - start); ++ } ++ + for (i = 0; i < num_pages; i++) { + struct page *p = eb->pages[i]; + +@@ -5362,9 +5381,8 @@ unlock_exit: + return ret; + } + +-void read_extent_buffer(struct extent_buffer *eb, void *dstv, +- unsigned long start, +- unsigned long len) ++void read_extent_buffer(const struct extent_buffer *eb, void *dstv, ++ unsigned long start, unsigned long len) + { + size_t cur; + size_t offset; +@@ -5393,9 +5411,9 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv, + } + } + +-int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv, +- unsigned long start, +- unsigned long len) ++int read_extent_buffer_to_user(const struct extent_buffer *eb, ++ void __user *dstv, ++ unsigned long start, unsigned long len) + { + size_t cur; + size_t offset; +@@ -5430,10 +5448,10 @@ int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv, + return ret; + } + +-int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, +- unsigned long min_len, char **map, +- unsigned long *map_start, +- unsigned long *map_len) ++int map_private_extent_buffer(const struct extent_buffer *eb, ++ unsigned long start, unsigned long min_len, ++ char **map, unsigned long *map_start, ++ unsigned long *map_len) + { + size_t offset = start & (PAGE_CACHE_SIZE - 1); + char *kaddr; +@@ -5468,9 +5486,8 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, + return 0; + } + +-int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv, +- unsigned long start, +- unsigned long len) ++int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv, ++ unsigned long start, unsigned long len) + { + size_t cur; + size_t offset; +diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h +index f4c1ae11855f..751435967724 100644 +--- a/fs/btrfs/extent_io.h ++++ b/fs/btrfs/extent_io.h +@@ -308,14 +308,13 @@ static inline void extent_buffer_get(struct extent_buffer *eb) + atomic_inc(&eb->refs); + } + +-int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv, +- unsigned long start, +- unsigned long len); +-void read_extent_buffer(struct extent_buffer *eb, void *dst, ++int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv, ++ unsigned long start, unsigned long len); ++void read_extent_buffer(const struct extent_buffer *eb, void *dst, + unsigned long start, + unsigned long len); +-int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dst, +- unsigned long start, ++int read_extent_buffer_to_user(const struct extent_buffer *eb, ++ void __user *dst, unsigned long start, + unsigned long len); + void write_extent_buffer(struct extent_buffer *eb, const void *src, + unsigned long start, unsigned long len); +@@ -334,10 +333,10 @@ int set_extent_buffer_uptodate(struct extent_buffer *eb); + int clear_extent_buffer_uptodate(struct extent_buffer *eb); + int extent_buffer_uptodate(struct extent_buffer *eb); + int extent_buffer_under_io(struct extent_buffer *eb); +-int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset, +- unsigned long min_len, char **map, +- unsigned long *map_start, +- unsigned long *map_len); ++int map_private_extent_buffer(const struct extent_buffer *eb, ++ unsigned long offset, unsigned long min_len, ++ char **map, unsigned long *map_start, ++ unsigned long *map_len); + int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end); + int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end); + int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, +diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c +index 6a98bddd8f33..84fb56d5c018 100644 +--- a/fs/btrfs/extent_map.c ++++ b/fs/btrfs/extent_map.c +@@ -76,7 +76,7 @@ void free_extent_map(struct extent_map *em) + WARN_ON(extent_map_in_tree(em)); + WARN_ON(!list_empty(&em->list)); + if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) +- kfree(em->bdev); ++ kfree(em->map_lookup); + kmem_cache_free(extent_map_cache, em); + } + } +diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h +index b2991fd8583e..eb8b8fae036b 100644 +--- a/fs/btrfs/extent_map.h ++++ b/fs/btrfs/extent_map.h +@@ -32,7 +32,15 @@ struct extent_map { + u64 block_len; + u64 generation; + unsigned long flags; +- struct block_device *bdev; ++ union { ++ struct block_device *bdev; ++ ++ /* ++ * used for chunk mappings ++ * flags & EXTENT_FLAG_FS_MAPPING must be set ++ */ ++ struct map_lookup *map_lookup; ++ }; + atomic_t refs; + unsigned int compress_type; + struct list_head list; +diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c +index 6dca9f937bf6..cc9ccc42f469 100644 +--- a/fs/btrfs/scrub.c ++++ b/fs/btrfs/scrub.c +@@ -3460,7 +3460,7 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, + return ret; + } + +- map = (struct map_lookup *)em->bdev; ++ map = em->map_lookup; + if (em->start != chunk_offset) + goto out; + +diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c +index b976597b0721..63ffd213b0b7 100644 +--- a/fs/btrfs/struct-funcs.c ++++ b/fs/btrfs/struct-funcs.c +@@ -50,8 +50,8 @@ static inline void put_unaligned_le8(u8 val, void *p) + */ + + #define DEFINE_BTRFS_SETGET_BITS(bits) \ +-u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \ +- unsigned long off, \ ++u##bits btrfs_get_token_##bits(const struct extent_buffer *eb, \ ++ const void *ptr, unsigned long off, \ + struct btrfs_map_token *token) \ + { \ + unsigned long part_offset = (unsigned long)ptr; \ +@@ -90,7 +90,8 @@ u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \ + return res; \ + } \ + void btrfs_set_token_##bits(struct extent_buffer *eb, \ +- void *ptr, unsigned long off, u##bits val, \ ++ const void *ptr, unsigned long off, \ ++ u##bits val, \ + struct btrfs_map_token *token) \ + { \ + unsigned long part_offset = (unsigned long)ptr; \ +@@ -133,7 +134,7 @@ DEFINE_BTRFS_SETGET_BITS(16) + DEFINE_BTRFS_SETGET_BITS(32) + DEFINE_BTRFS_SETGET_BITS(64) + +-void btrfs_node_key(struct extent_buffer *eb, ++void btrfs_node_key(const struct extent_buffer *eb, + struct btrfs_disk_key *disk_key, int nr) + { + unsigned long ptr = btrfs_node_key_ptr_offset(nr); +diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c +new file mode 100644 +index 000000000000..5b98f3c76ce4 +--- /dev/null ++++ b/fs/btrfs/tree-checker.c +@@ -0,0 +1,649 @@ ++/* ++ * Copyright (C) Qu Wenruo 2017. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public ++ * License v2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public ++ * License along with this program. ++ */ ++ ++/* ++ * The module is used to catch unexpected/corrupted tree block data. ++ * Such behavior can be caused either by a fuzzed image or bugs. ++ * ++ * The objective is to do leaf/node validation checks when tree block is read ++ * from disk, and check *every* possible member, so other code won't ++ * need to checking them again. ++ * ++ * Due to the potential and unwanted damage, every checker needs to be ++ * carefully reviewed otherwise so it does not prevent mount of valid images. ++ */ ++ ++#include "ctree.h" ++#include "tree-checker.h" ++#include "disk-io.h" ++#include "compression.h" ++#include "hash.h" ++#include "volumes.h" ++ ++#define CORRUPT(reason, eb, root, slot) \ ++ btrfs_crit(root->fs_info, \ ++ "corrupt %s, %s: block=%llu, root=%llu, slot=%d", \ ++ btrfs_header_level(eb) == 0 ? "leaf" : "node", \ ++ reason, btrfs_header_bytenr(eb), root->objectid, slot) ++ ++/* ++ * Error message should follow the following format: ++ * corrupt <type>: <identifier>, <reason>[, <bad_value>] ++ * ++ * @type: leaf or node ++ * @identifier: the necessary info to locate the leaf/node. ++ * It's recommened to decode key.objecitd/offset if it's ++ * meaningful. ++ * @reason: describe the error ++ * @bad_value: optional, it's recommened to output bad value and its ++ * expected value (range). ++ * ++ * Since comma is used to separate the components, only space is allowed ++ * inside each component. ++ */ ++ ++/* ++ * Append generic "corrupt leaf/node root=%llu block=%llu slot=%d: " to @fmt. ++ * Allows callers to customize the output. ++ */ ++__printf(4, 5) ++static void generic_err(const struct btrfs_root *root, ++ const struct extent_buffer *eb, int slot, ++ const char *fmt, ...) ++{ ++ struct va_format vaf; ++ va_list args; ++ ++ va_start(args, fmt); ++ ++ vaf.fmt = fmt; ++ vaf.va = &args; ++ ++ btrfs_crit(root->fs_info, ++ "corrupt %s: root=%llu block=%llu slot=%d, %pV", ++ btrfs_header_level(eb) == 0 ? "leaf" : "node", ++ root->objectid, btrfs_header_bytenr(eb), slot, &vaf); ++ va_end(args); ++} ++ ++static int check_extent_data_item(struct btrfs_root *root, ++ struct extent_buffer *leaf, ++ struct btrfs_key *key, int slot) ++{ ++ struct btrfs_file_extent_item *fi; ++ u32 sectorsize = root->sectorsize; ++ u32 item_size = btrfs_item_size_nr(leaf, slot); ++ ++ if (!IS_ALIGNED(key->offset, sectorsize)) { ++ CORRUPT("unaligned key offset for file extent", ++ leaf, root, slot); ++ return -EUCLEAN; ++ } ++ ++ fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); ++ ++ if (btrfs_file_extent_type(leaf, fi) > BTRFS_FILE_EXTENT_TYPES) { ++ CORRUPT("invalid file extent type", leaf, root, slot); ++ return -EUCLEAN; ++ } ++ ++ /* ++ * Support for new compression/encrption must introduce incompat flag, ++ * and must be caught in open_ctree(). ++ */ ++ if (btrfs_file_extent_compression(leaf, fi) > BTRFS_COMPRESS_TYPES) { ++ CORRUPT("invalid file extent compression", leaf, root, slot); ++ return -EUCLEAN; ++ } ++ if (btrfs_file_extent_encryption(leaf, fi)) { ++ CORRUPT("invalid file extent encryption", leaf, root, slot); ++ return -EUCLEAN; ++ } ++ if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) { ++ /* Inline extent must have 0 as key offset */ ++ if (key->offset) { ++ CORRUPT("inline extent has non-zero key offset", ++ leaf, root, slot); ++ return -EUCLEAN; ++ } ++ ++ /* Compressed inline extent has no on-disk size, skip it */ ++ if (btrfs_file_extent_compression(leaf, fi) != ++ BTRFS_COMPRESS_NONE) ++ return 0; ++ ++ /* Uncompressed inline extent size must match item size */ ++ if (item_size != BTRFS_FILE_EXTENT_INLINE_DATA_START + ++ btrfs_file_extent_ram_bytes(leaf, fi)) { ++ CORRUPT("plaintext inline extent has invalid size", ++ leaf, root, slot); ++ return -EUCLEAN; ++ } ++ return 0; ++ } ++ ++ /* Regular or preallocated extent has fixed item size */ ++ if (item_size != sizeof(*fi)) { ++ CORRUPT( ++ "regluar or preallocated extent data item size is invalid", ++ leaf, root, slot); ++ return -EUCLEAN; ++ } ++ if (!IS_ALIGNED(btrfs_file_extent_ram_bytes(leaf, fi), sectorsize) || ++ !IS_ALIGNED(btrfs_file_extent_disk_bytenr(leaf, fi), sectorsize) || ++ !IS_ALIGNED(btrfs_file_extent_disk_num_bytes(leaf, fi), sectorsize) || ++ !IS_ALIGNED(btrfs_file_extent_offset(leaf, fi), sectorsize) || ++ !IS_ALIGNED(btrfs_file_extent_num_bytes(leaf, fi), sectorsize)) { ++ CORRUPT( ++ "regular or preallocated extent data item has unaligned value", ++ leaf, root, slot); ++ return -EUCLEAN; ++ } ++ ++ return 0; ++} ++ ++static int check_csum_item(struct btrfs_root *root, struct extent_buffer *leaf, ++ struct btrfs_key *key, int slot) ++{ ++ u32 sectorsize = root->sectorsize; ++ u32 csumsize = btrfs_super_csum_size(root->fs_info->super_copy); ++ ++ if (key->objectid != BTRFS_EXTENT_CSUM_OBJECTID) { ++ CORRUPT("invalid objectid for csum item", leaf, root, slot); ++ return -EUCLEAN; ++ } ++ if (!IS_ALIGNED(key->offset, sectorsize)) { ++ CORRUPT("unaligned key offset for csum item", leaf, root, slot); ++ return -EUCLEAN; ++ } ++ if (!IS_ALIGNED(btrfs_item_size_nr(leaf, slot), csumsize)) { ++ CORRUPT("unaligned csum item size", leaf, root, slot); ++ return -EUCLEAN; ++ } ++ return 0; ++} ++ ++/* ++ * Customized reported for dir_item, only important new info is key->objectid, ++ * which represents inode number ++ */ ++__printf(4, 5) ++static void dir_item_err(const struct btrfs_root *root, ++ const struct extent_buffer *eb, int slot, ++ const char *fmt, ...) ++{ ++ struct btrfs_key key; ++ struct va_format vaf; ++ va_list args; ++ ++ btrfs_item_key_to_cpu(eb, &key, slot); ++ va_start(args, fmt); ++ ++ vaf.fmt = fmt; ++ vaf.va = &args; ++ ++ btrfs_crit(root->fs_info, ++ "corrupt %s: root=%llu block=%llu slot=%d ino=%llu, %pV", ++ btrfs_header_level(eb) == 0 ? "leaf" : "node", root->objectid, ++ btrfs_header_bytenr(eb), slot, key.objectid, &vaf); ++ va_end(args); ++} ++ ++static int check_dir_item(struct btrfs_root *root, ++ struct extent_buffer *leaf, ++ struct btrfs_key *key, int slot) ++{ ++ struct btrfs_dir_item *di; ++ u32 item_size = btrfs_item_size_nr(leaf, slot); ++ u32 cur = 0; ++ ++ di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); ++ while (cur < item_size) { ++ u32 name_len; ++ u32 data_len; ++ u32 max_name_len; ++ u32 total_size; ++ u32 name_hash; ++ u8 dir_type; ++ ++ /* header itself should not cross item boundary */ ++ if (cur + sizeof(*di) > item_size) { ++ dir_item_err(root, leaf, slot, ++ "dir item header crosses item boundary, have %zu boundary %u", ++ cur + sizeof(*di), item_size); ++ return -EUCLEAN; ++ } ++ ++ /* dir type check */ ++ dir_type = btrfs_dir_type(leaf, di); ++ if (dir_type >= BTRFS_FT_MAX) { ++ dir_item_err(root, leaf, slot, ++ "invalid dir item type, have %u expect [0, %u)", ++ dir_type, BTRFS_FT_MAX); ++ return -EUCLEAN; ++ } ++ ++ if (key->type == BTRFS_XATTR_ITEM_KEY && ++ dir_type != BTRFS_FT_XATTR) { ++ dir_item_err(root, leaf, slot, ++ "invalid dir item type for XATTR key, have %u expect %u", ++ dir_type, BTRFS_FT_XATTR); ++ return -EUCLEAN; ++ } ++ if (dir_type == BTRFS_FT_XATTR && ++ key->type != BTRFS_XATTR_ITEM_KEY) { ++ dir_item_err(root, leaf, slot, ++ "xattr dir type found for non-XATTR key"); ++ return -EUCLEAN; ++ } ++ if (dir_type == BTRFS_FT_XATTR) ++ max_name_len = XATTR_NAME_MAX; ++ else ++ max_name_len = BTRFS_NAME_LEN; ++ ++ /* Name/data length check */ ++ name_len = btrfs_dir_name_len(leaf, di); ++ data_len = btrfs_dir_data_len(leaf, di); ++ if (name_len > max_name_len) { ++ dir_item_err(root, leaf, slot, ++ "dir item name len too long, have %u max %u", ++ name_len, max_name_len); ++ return -EUCLEAN; ++ } ++ if (name_len + data_len > BTRFS_MAX_XATTR_SIZE(root)) { ++ dir_item_err(root, leaf, slot, ++ "dir item name and data len too long, have %u max %zu", ++ name_len + data_len, ++ BTRFS_MAX_XATTR_SIZE(root)); ++ return -EUCLEAN; ++ } ++ ++ if (data_len && dir_type != BTRFS_FT_XATTR) { ++ dir_item_err(root, leaf, slot, ++ "dir item with invalid data len, have %u expect 0", ++ data_len); ++ return -EUCLEAN; ++ } ++ ++ total_size = sizeof(*di) + name_len + data_len; ++ ++ /* header and name/data should not cross item boundary */ ++ if (cur + total_size > item_size) { ++ dir_item_err(root, leaf, slot, ++ "dir item data crosses item boundary, have %u boundary %u", ++ cur + total_size, item_size); ++ return -EUCLEAN; ++ } ++ ++ /* ++ * Special check for XATTR/DIR_ITEM, as key->offset is name ++ * hash, should match its name ++ */ ++ if (key->type == BTRFS_DIR_ITEM_KEY || ++ key->type == BTRFS_XATTR_ITEM_KEY) { ++ char namebuf[max(BTRFS_NAME_LEN, XATTR_NAME_MAX)]; ++ ++ read_extent_buffer(leaf, namebuf, ++ (unsigned long)(di + 1), name_len); ++ name_hash = btrfs_name_hash(namebuf, name_len); ++ if (key->offset != name_hash) { ++ dir_item_err(root, leaf, slot, ++ "name hash mismatch with key, have 0x%016x expect 0x%016llx", ++ name_hash, key->offset); ++ return -EUCLEAN; ++ } ++ } ++ cur += total_size; ++ di = (struct btrfs_dir_item *)((void *)di + total_size); ++ } ++ return 0; ++} ++ ++__printf(4, 5) ++__cold ++static void block_group_err(const struct btrfs_fs_info *fs_info, ++ const struct extent_buffer *eb, int slot, ++ const char *fmt, ...) ++{ ++ struct btrfs_key key; ++ struct va_format vaf; ++ va_list args; ++ ++ btrfs_item_key_to_cpu(eb, &key, slot); ++ va_start(args, fmt); ++ ++ vaf.fmt = fmt; ++ vaf.va = &args; ++ ++ btrfs_crit(fs_info, ++ "corrupt %s: root=%llu block=%llu slot=%d bg_start=%llu bg_len=%llu, %pV", ++ btrfs_header_level(eb) == 0 ? "leaf" : "node", ++ btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, ++ key.objectid, key.offset, &vaf); ++ va_end(args); ++} ++ ++static int check_block_group_item(struct btrfs_fs_info *fs_info, ++ struct extent_buffer *leaf, ++ struct btrfs_key *key, int slot) ++{ ++ struct btrfs_block_group_item bgi; ++ u32 item_size = btrfs_item_size_nr(leaf, slot); ++ u64 flags; ++ u64 type; ++ ++ /* ++ * Here we don't really care about alignment since extent allocator can ++ * handle it. We care more about the size, as if one block group is ++ * larger than maximum size, it's must be some obvious corruption. ++ */ ++ if (key->offset > BTRFS_MAX_DATA_CHUNK_SIZE || key->offset == 0) { ++ block_group_err(fs_info, leaf, slot, ++ "invalid block group size, have %llu expect (0, %llu]", ++ key->offset, BTRFS_MAX_DATA_CHUNK_SIZE); ++ return -EUCLEAN; ++ } ++ ++ if (item_size != sizeof(bgi)) { ++ block_group_err(fs_info, leaf, slot, ++ "invalid item size, have %u expect %zu", ++ item_size, sizeof(bgi)); ++ return -EUCLEAN; ++ } ++ ++ read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot), ++ sizeof(bgi)); ++ if (btrfs_block_group_chunk_objectid(&bgi) != ++ BTRFS_FIRST_CHUNK_TREE_OBJECTID) { ++ block_group_err(fs_info, leaf, slot, ++ "invalid block group chunk objectid, have %llu expect %llu", ++ btrfs_block_group_chunk_objectid(&bgi), ++ BTRFS_FIRST_CHUNK_TREE_OBJECTID); ++ return -EUCLEAN; ++ } ++ ++ if (btrfs_block_group_used(&bgi) > key->offset) { ++ block_group_err(fs_info, leaf, slot, ++ "invalid block group used, have %llu expect [0, %llu)", ++ btrfs_block_group_used(&bgi), key->offset); ++ return -EUCLEAN; ++ } ++ ++ flags = btrfs_block_group_flags(&bgi); ++ if (hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) > 1) { ++ block_group_err(fs_info, leaf, slot, ++"invalid profile flags, have 0x%llx (%lu bits set) expect no more than 1 bit set", ++ flags & BTRFS_BLOCK_GROUP_PROFILE_MASK, ++ hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK)); ++ return -EUCLEAN; ++ } ++ ++ type = flags & BTRFS_BLOCK_GROUP_TYPE_MASK; ++ if (type != BTRFS_BLOCK_GROUP_DATA && ++ type != BTRFS_BLOCK_GROUP_METADATA && ++ type != BTRFS_BLOCK_GROUP_SYSTEM && ++ type != (BTRFS_BLOCK_GROUP_METADATA | ++ BTRFS_BLOCK_GROUP_DATA)) { ++ block_group_err(fs_info, leaf, slot, ++"invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llx or 0x%llx", ++ type, hweight64(type), ++ BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_METADATA, ++ BTRFS_BLOCK_GROUP_SYSTEM, ++ BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA); ++ return -EUCLEAN; ++ } ++ return 0; ++} ++ ++/* ++ * Common point to switch the item-specific validation. ++ */ ++static int check_leaf_item(struct btrfs_root *root, ++ struct extent_buffer *leaf, ++ struct btrfs_key *key, int slot) ++{ ++ int ret = 0; ++ ++ switch (key->type) { ++ case BTRFS_EXTENT_DATA_KEY: ++ ret = check_extent_data_item(root, leaf, key, slot); ++ break; ++ case BTRFS_EXTENT_CSUM_KEY: ++ ret = check_csum_item(root, leaf, key, slot); ++ break; ++ case BTRFS_DIR_ITEM_KEY: ++ case BTRFS_DIR_INDEX_KEY: ++ case BTRFS_XATTR_ITEM_KEY: ++ ret = check_dir_item(root, leaf, key, slot); ++ break; ++ case BTRFS_BLOCK_GROUP_ITEM_KEY: ++ ret = check_block_group_item(root->fs_info, leaf, key, slot); ++ break; ++ } ++ return ret; ++} ++ ++static int check_leaf(struct btrfs_root *root, struct extent_buffer *leaf, ++ bool check_item_data) ++{ ++ struct btrfs_fs_info *fs_info = root->fs_info; ++ /* No valid key type is 0, so all key should be larger than this key */ ++ struct btrfs_key prev_key = {0, 0, 0}; ++ struct btrfs_key key; ++ u32 nritems = btrfs_header_nritems(leaf); ++ int slot; ++ ++ if (btrfs_header_level(leaf) != 0) { ++ generic_err(root, leaf, 0, ++ "invalid level for leaf, have %d expect 0", ++ btrfs_header_level(leaf)); ++ return -EUCLEAN; ++ } ++ ++ /* ++ * Extent buffers from a relocation tree have a owner field that ++ * corresponds to the subvolume tree they are based on. So just from an ++ * extent buffer alone we can not find out what is the id of the ++ * corresponding subvolume tree, so we can not figure out if the extent ++ * buffer corresponds to the root of the relocation tree or not. So ++ * skip this check for relocation trees. ++ */ ++ if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) { ++ u64 owner = btrfs_header_owner(leaf); ++ struct btrfs_root *check_root; ++ ++ /* These trees must never be empty */ ++ if (owner == BTRFS_ROOT_TREE_OBJECTID || ++ owner == BTRFS_CHUNK_TREE_OBJECTID || ++ owner == BTRFS_EXTENT_TREE_OBJECTID || ++ owner == BTRFS_DEV_TREE_OBJECTID || ++ owner == BTRFS_FS_TREE_OBJECTID || ++ owner == BTRFS_DATA_RELOC_TREE_OBJECTID) { ++ generic_err(root, leaf, 0, ++ "invalid root, root %llu must never be empty", ++ owner); ++ return -EUCLEAN; ++ } ++ key.objectid = owner; ++ key.type = BTRFS_ROOT_ITEM_KEY; ++ key.offset = (u64)-1; ++ ++ check_root = btrfs_get_fs_root(fs_info, &key, false); ++ /* ++ * The only reason we also check NULL here is that during ++ * open_ctree() some roots has not yet been set up. ++ */ ++ if (!IS_ERR_OR_NULL(check_root)) { ++ struct extent_buffer *eb; ++ ++ eb = btrfs_root_node(check_root); ++ /* if leaf is the root, then it's fine */ ++ if (leaf != eb) { ++ CORRUPT("non-root leaf's nritems is 0", ++ leaf, check_root, 0); ++ free_extent_buffer(eb); ++ return -EUCLEAN; ++ } ++ free_extent_buffer(eb); ++ } ++ return 0; ++ } ++ ++ if (nritems == 0) ++ return 0; ++ ++ /* ++ * Check the following things to make sure this is a good leaf, and ++ * leaf users won't need to bother with similar sanity checks: ++ * ++ * 1) key ordering ++ * 2) item offset and size ++ * No overlap, no hole, all inside the leaf. ++ * 3) item content ++ * If possible, do comprehensive sanity check. ++ * NOTE: All checks must only rely on the item data itself. ++ */ ++ for (slot = 0; slot < nritems; slot++) { ++ u32 item_end_expected; ++ int ret; ++ ++ btrfs_item_key_to_cpu(leaf, &key, slot); ++ ++ /* Make sure the keys are in the right order */ ++ if (btrfs_comp_cpu_keys(&prev_key, &key) >= 0) { ++ CORRUPT("bad key order", leaf, root, slot); ++ return -EUCLEAN; ++ } ++ ++ /* ++ * Make sure the offset and ends are right, remember that the ++ * item data starts at the end of the leaf and grows towards the ++ * front. ++ */ ++ if (slot == 0) ++ item_end_expected = BTRFS_LEAF_DATA_SIZE(root); ++ else ++ item_end_expected = btrfs_item_offset_nr(leaf, ++ slot - 1); ++ if (btrfs_item_end_nr(leaf, slot) != item_end_expected) { ++ CORRUPT("slot offset bad", leaf, root, slot); ++ return -EUCLEAN; ++ } ++ ++ /* ++ * Check to make sure that we don't point outside of the leaf, ++ * just in case all the items are consistent to each other, but ++ * all point outside of the leaf. ++ */ ++ if (btrfs_item_end_nr(leaf, slot) > ++ BTRFS_LEAF_DATA_SIZE(root)) { ++ CORRUPT("slot end outside of leaf", leaf, root, slot); ++ return -EUCLEAN; ++ } ++ ++ /* Also check if the item pointer overlaps with btrfs item. */ ++ if (btrfs_item_nr_offset(slot) + sizeof(struct btrfs_item) > ++ btrfs_item_ptr_offset(leaf, slot)) { ++ CORRUPT("slot overlap with its data", leaf, root, slot); ++ return -EUCLEAN; ++ } ++ ++ if (check_item_data) { ++ /* ++ * Check if the item size and content meet other ++ * criteria ++ */ ++ ret = check_leaf_item(root, leaf, &key, slot); ++ if (ret < 0) ++ return ret; ++ } ++ ++ prev_key.objectid = key.objectid; ++ prev_key.type = key.type; ++ prev_key.offset = key.offset; ++ } ++ ++ return 0; ++} ++ ++int btrfs_check_leaf_full(struct btrfs_root *root, struct extent_buffer *leaf) ++{ ++ return check_leaf(root, leaf, true); ++} ++ ++int btrfs_check_leaf_relaxed(struct btrfs_root *root, ++ struct extent_buffer *leaf) ++{ ++ return check_leaf(root, leaf, false); ++} ++ ++int btrfs_check_node(struct btrfs_root *root, struct extent_buffer *node) ++{ ++ unsigned long nr = btrfs_header_nritems(node); ++ struct btrfs_key key, next_key; ++ int slot; ++ int level = btrfs_header_level(node); ++ u64 bytenr; ++ int ret = 0; ++ ++ if (level <= 0 || level >= BTRFS_MAX_LEVEL) { ++ generic_err(root, node, 0, ++ "invalid level for node, have %d expect [1, %d]", ++ level, BTRFS_MAX_LEVEL - 1); ++ return -EUCLEAN; ++ } ++ if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(root)) { ++ btrfs_crit(root->fs_info, ++"corrupt node: root=%llu block=%llu, nritems too %s, have %lu expect range [1,%zu]", ++ root->objectid, node->start, ++ nr == 0 ? "small" : "large", nr, ++ BTRFS_NODEPTRS_PER_BLOCK(root)); ++ return -EUCLEAN; ++ } ++ ++ for (slot = 0; slot < nr - 1; slot++) { ++ bytenr = btrfs_node_blockptr(node, slot); ++ btrfs_node_key_to_cpu(node, &key, slot); ++ btrfs_node_key_to_cpu(node, &next_key, slot + 1); ++ ++ if (!bytenr) { ++ generic_err(root, node, slot, ++ "invalid NULL node pointer"); ++ ret = -EUCLEAN; ++ goto out; ++ } ++ if (!IS_ALIGNED(bytenr, root->sectorsize)) { ++ generic_err(root, node, slot, ++ "unaligned pointer, have %llu should be aligned to %u", ++ bytenr, root->sectorsize); ++ ret = -EUCLEAN; ++ goto out; ++ } ++ ++ if (btrfs_comp_cpu_keys(&key, &next_key) >= 0) { ++ generic_err(root, node, slot, ++ "bad key order, current (%llu %u %llu) next (%llu %u %llu)", ++ key.objectid, key.type, key.offset, ++ next_key.objectid, next_key.type, ++ next_key.offset); ++ ret = -EUCLEAN; ++ goto out; ++ } ++ } ++out: ++ return ret; ++} +diff --git a/fs/btrfs/tree-checker.h b/fs/btrfs/tree-checker.h +new file mode 100644 +index 000000000000..3d53e8d6fda0 +--- /dev/null ++++ b/fs/btrfs/tree-checker.h +@@ -0,0 +1,38 @@ ++/* ++ * Copyright (C) Qu Wenruo 2017. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public ++ * License v2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public ++ * License along with this program. ++ */ ++ ++#ifndef __BTRFS_TREE_CHECKER__ ++#define __BTRFS_TREE_CHECKER__ ++ ++#include "ctree.h" ++#include "extent_io.h" ++ ++/* ++ * Comprehensive leaf checker. ++ * Will check not only the item pointers, but also every possible member ++ * in item data. ++ */ ++int btrfs_check_leaf_full(struct btrfs_root *root, struct extent_buffer *leaf); ++ ++/* ++ * Less strict leaf checker. ++ * Will only check item pointers, not reading item data. ++ */ ++int btrfs_check_leaf_relaxed(struct btrfs_root *root, ++ struct extent_buffer *leaf); ++int btrfs_check_node(struct btrfs_root *root, struct extent_buffer *node); ++ ++#endif +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c +index b4d63a9842fa..5e8fe8f3942d 100644 +--- a/fs/btrfs/volumes.c ++++ b/fs/btrfs/volumes.c +@@ -1184,7 +1184,7 @@ again: + struct map_lookup *map; + int i; + +- map = (struct map_lookup *)em->bdev; ++ map = em->map_lookup; + for (i = 0; i < map->num_stripes; i++) { + u64 end; + +@@ -2757,7 +2757,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, + free_extent_map(em); + return -EINVAL; + } +- map = (struct map_lookup *)em->bdev; ++ map = em->map_lookup; + lock_chunks(root->fs_info->chunk_root); + check_system_chunk(trans, extent_root, map->type); + unlock_chunks(root->fs_info->chunk_root); +@@ -4540,7 +4540,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, + + if (type & BTRFS_BLOCK_GROUP_DATA) { + max_stripe_size = 1024 * 1024 * 1024; +- max_chunk_size = 10 * max_stripe_size; ++ max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE; + if (!devs_max) + devs_max = BTRFS_MAX_DEVS(info->chunk_root); + } else if (type & BTRFS_BLOCK_GROUP_METADATA) { +@@ -4731,7 +4731,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, + goto error; + } + set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); +- em->bdev = (struct block_device *)map; ++ em->map_lookup = map; + em->start = start; + em->len = num_bytes; + em->block_start = 0; +@@ -4826,7 +4826,7 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans, + return -EINVAL; + } + +- map = (struct map_lookup *)em->bdev; ++ map = em->map_lookup; + item_size = btrfs_chunk_item_size(map->num_stripes); + stripe_size = em->orig_block_len; + +@@ -4968,7 +4968,7 @@ int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset) + if (!em) + return 1; + +- map = (struct map_lookup *)em->bdev; ++ map = em->map_lookup; + for (i = 0; i < map->num_stripes; i++) { + if (map->stripes[i].dev->missing) { + miss_ndevs++; +@@ -5048,7 +5048,7 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) + return 1; + } + +- map = (struct map_lookup *)em->bdev; ++ map = em->map_lookup; + if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1)) + ret = map->num_stripes; + else if (map->type & BTRFS_BLOCK_GROUP_RAID10) +@@ -5091,7 +5091,7 @@ unsigned long btrfs_full_stripe_len(struct btrfs_root *root, + BUG_ON(!em); + + BUG_ON(em->start > logical || em->start + em->len < logical); +- map = (struct map_lookup *)em->bdev; ++ map = em->map_lookup; + if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) + len = map->stripe_len * nr_data_stripes(map); + free_extent_map(em); +@@ -5112,7 +5112,7 @@ int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree, + BUG_ON(!em); + + BUG_ON(em->start > logical || em->start + em->len < logical); +- map = (struct map_lookup *)em->bdev; ++ map = em->map_lookup; + if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) + ret = 1; + free_extent_map(em); +@@ -5271,7 +5271,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, + return -EINVAL; + } + +- map = (struct map_lookup *)em->bdev; ++ map = em->map_lookup; + offset = logical - em->start; + + stripe_len = map->stripe_len; +@@ -5813,7 +5813,7 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, + free_extent_map(em); + return -EIO; + } +- map = (struct map_lookup *)em->bdev; ++ map = em->map_lookup; + + length = em->len; + rmap_len = map->stripe_len; +@@ -6208,6 +6208,101 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, + return dev; + } + ++/* Return -EIO if any error, otherwise return 0. */ ++static int btrfs_check_chunk_valid(struct btrfs_root *root, ++ struct extent_buffer *leaf, ++ struct btrfs_chunk *chunk, u64 logical) ++{ ++ u64 length; ++ u64 stripe_len; ++ u16 num_stripes; ++ u16 sub_stripes; ++ u64 type; ++ u64 features; ++ bool mixed = false; ++ ++ length = btrfs_chunk_length(leaf, chunk); ++ stripe_len = btrfs_chunk_stripe_len(leaf, chunk); ++ num_stripes = btrfs_chunk_num_stripes(leaf, chunk); ++ sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); ++ type = btrfs_chunk_type(leaf, chunk); ++ ++ if (!num_stripes) { ++ btrfs_err(root->fs_info, "invalid chunk num_stripes: %u", ++ num_stripes); ++ return -EIO; ++ } ++ if (!IS_ALIGNED(logical, root->sectorsize)) { ++ btrfs_err(root->fs_info, ++ "invalid chunk logical %llu", logical); ++ return -EIO; ++ } ++ if (btrfs_chunk_sector_size(leaf, chunk) != root->sectorsize) { ++ btrfs_err(root->fs_info, "invalid chunk sectorsize %u", ++ btrfs_chunk_sector_size(leaf, chunk)); ++ return -EIO; ++ } ++ if (!length || !IS_ALIGNED(length, root->sectorsize)) { ++ btrfs_err(root->fs_info, ++ "invalid chunk length %llu", length); ++ return -EIO; ++ } ++ if (!is_power_of_2(stripe_len)) { ++ btrfs_err(root->fs_info, "invalid chunk stripe length: %llu", ++ stripe_len); ++ return -EIO; ++ } ++ if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) & ++ type) { ++ btrfs_err(root->fs_info, "unrecognized chunk type: %llu", ++ ~(BTRFS_BLOCK_GROUP_TYPE_MASK | ++ BTRFS_BLOCK_GROUP_PROFILE_MASK) & ++ btrfs_chunk_type(leaf, chunk)); ++ return -EIO; ++ } ++ ++ if ((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0) { ++ btrfs_err(root->fs_info, "missing chunk type flag: 0x%llx", type); ++ return -EIO; ++ } ++ ++ if ((type & BTRFS_BLOCK_GROUP_SYSTEM) && ++ (type & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA))) { ++ btrfs_err(root->fs_info, ++ "system chunk with data or metadata type: 0x%llx", type); ++ return -EIO; ++ } ++ ++ features = btrfs_super_incompat_flags(root->fs_info->super_copy); ++ if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) ++ mixed = true; ++ ++ if (!mixed) { ++ if ((type & BTRFS_BLOCK_GROUP_METADATA) && ++ (type & BTRFS_BLOCK_GROUP_DATA)) { ++ btrfs_err(root->fs_info, ++ "mixed chunk type in non-mixed mode: 0x%llx", type); ++ return -EIO; ++ } ++ } ++ ++ if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) || ++ (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) || ++ (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) || ++ (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) || ++ (type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) || ++ ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 && ++ num_stripes != 1)) { ++ btrfs_err(root->fs_info, ++ "invalid num_stripes:sub_stripes %u:%u for profile %llu", ++ num_stripes, sub_stripes, ++ type & BTRFS_BLOCK_GROUP_PROFILE_MASK); ++ return -EIO; ++ } ++ ++ return 0; ++} ++ + static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, + struct extent_buffer *leaf, + struct btrfs_chunk *chunk) +@@ -6217,6 +6312,7 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, + struct extent_map *em; + u64 logical; + u64 length; ++ u64 stripe_len; + u64 devid; + u8 uuid[BTRFS_UUID_SIZE]; + int num_stripes; +@@ -6225,6 +6321,12 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, + + logical = key->offset; + length = btrfs_chunk_length(leaf, chunk); ++ stripe_len = btrfs_chunk_stripe_len(leaf, chunk); ++ num_stripes = btrfs_chunk_num_stripes(leaf, chunk); ++ ++ ret = btrfs_check_chunk_valid(root, leaf, chunk, logical); ++ if (ret) ++ return ret; + + read_lock(&map_tree->map_tree.lock); + em = lookup_extent_mapping(&map_tree->map_tree, logical, 1); +@@ -6241,7 +6343,6 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, + em = alloc_extent_map(); + if (!em) + return -ENOMEM; +- num_stripes = btrfs_chunk_num_stripes(leaf, chunk); + map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); + if (!map) { + free_extent_map(em); +@@ -6249,7 +6350,7 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, + } + + set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); +- em->bdev = (struct block_device *)map; ++ em->map_lookup = map; + em->start = logical; + em->len = length; + em->orig_start = 0; +@@ -6473,6 +6574,7 @@ int btrfs_read_sys_array(struct btrfs_root *root) + u32 array_size; + u32 len = 0; + u32 cur_offset; ++ u64 type; + struct btrfs_key key; + + ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize); +@@ -6539,6 +6641,15 @@ int btrfs_read_sys_array(struct btrfs_root *root) + break; + } + ++ type = btrfs_chunk_type(sb, chunk); ++ if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { ++ btrfs_err(root->fs_info, ++ "invalid chunk type %llu in sys_array at offset %u", ++ type, cur_offset); ++ ret = -EIO; ++ break; ++ } ++ + len = btrfs_chunk_item_size(num_stripes); + if (cur_offset + len > array_size) + goto out_short_read; +@@ -6948,7 +7059,7 @@ void btrfs_update_commit_device_bytes_used(struct btrfs_root *root, + /* In order to kick the device replace finish process */ + lock_chunks(root); + list_for_each_entry(em, &transaction->pending_chunks, list) { +- map = (struct map_lookup *)em->bdev; ++ map = em->map_lookup; + + for (i = 0; i < map->num_stripes; i++) { + dev = map->stripes[i].dev; +diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h +index d5c84f6b1353..3c651df420be 100644 +--- a/fs/btrfs/volumes.h ++++ b/fs/btrfs/volumes.h +@@ -24,6 +24,8 @@ + #include <linux/btrfs.h> + #include "async-thread.h" + ++#define BTRFS_MAX_DATA_CHUNK_SIZE (10ULL * SZ_1G) ++ + extern struct mutex uuid_mutex; + + #define BTRFS_STRIPE_LEN (64 * 1024) +diff --git a/fs/cifs/file.c b/fs/cifs/file.c +index 0141aba9eca6..026b399af215 100644 +--- a/fs/cifs/file.c ++++ b/fs/cifs/file.c +@@ -1073,10 +1073,10 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile) + + /* + * Accessing maxBuf is racy with cifs_reconnect - need to store value +- * and check it for zero before using. ++ * and check it before using. + */ + max_buf = tcon->ses->server->maxBuf; +- if (!max_buf) { ++ if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) { + free_xid(xid); + return -EINVAL; + } +@@ -1404,10 +1404,10 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, + + /* + * Accessing maxBuf is racy with cifs_reconnect - need to store value +- * and check it for zero before using. ++ * and check it before using. + */ + max_buf = tcon->ses->server->maxBuf; +- if (!max_buf) ++ if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) + return -EINVAL; + + max_num = (max_buf - sizeof(struct smb_hdr)) / +diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c +index b2aff0c6f22c..b7885dc0d9bb 100644 +--- a/fs/cifs/smb2file.c ++++ b/fs/cifs/smb2file.c +@@ -123,10 +123,10 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, + + /* + * Accessing maxBuf is racy with cifs_reconnect - need to store value +- * and check it for zero before using. ++ * and check it before using. + */ + max_buf = tcon->ses->server->maxBuf; +- if (!max_buf) ++ if (max_buf < sizeof(struct smb2_lock_element)) + return -EINVAL; + + max_num = max_buf / sizeof(struct smb2_lock_element); +diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c +index 54af10204e83..1cf0a336ec06 100644 +--- a/fs/cifs/transport.c ++++ b/fs/cifs/transport.c +@@ -360,7 +360,7 @@ uncork: + if (rc < 0 && rc != -EINTR) + cifs_dbg(VFS, "Error %d sending data on socket to server\n", + rc); +- else ++ else if (rc > 0) + rc = 0; + + return rc; +diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c +index 46d4fac48cf4..0dcd33f62637 100644 +--- a/fs/ext4/inline.c ++++ b/fs/ext4/inline.c +@@ -1861,12 +1861,12 @@ int ext4_inline_data_fiemap(struct inode *inode, + physical += (char *)ext4_raw_inode(&iloc) - iloc.bh->b_data; + physical += offsetof(struct ext4_inode, i_block); + +- if (physical) +- error = fiemap_fill_next_extent(fieinfo, start, physical, +- inline_len, flags); + brelse(iloc.bh); + out: + up_read(&EXT4_I(inode)->xattr_sem); ++ if (physical) ++ error = fiemap_fill_next_extent(fieinfo, start, physical, ++ inline_len, flags); + return (error < 0 ? error : 0); + } + +diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h +index cc0fc712bb82..a8ac3f25b4ec 100644 +--- a/include/linux/sunrpc/svc.h ++++ b/include/linux/sunrpc/svc.h +@@ -290,9 +290,12 @@ struct svc_rqst { + struct svc_cacherep * rq_cacherep; /* cache info */ + struct task_struct *rq_task; /* service thread */ + spinlock_t rq_lock; /* per-request lock */ ++ struct net *rq_bc_net; /* pointer to backchannel's ++ * net namespace ++ */ + }; + +-#define SVC_NET(svc_rqst) (svc_rqst->rq_xprt->xpt_net) ++#define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net) + + /* + * Rigorous type checking on sockaddr type conversions +diff --git a/mm/slab.c b/mm/slab.c +index fa49c01225a7..92df044f5e00 100644 +--- a/mm/slab.c ++++ b/mm/slab.c +@@ -875,8 +875,10 @@ static struct alien_cache *__alloc_alien_cache(int node, int entries, + struct alien_cache *alc = NULL; + + alc = kmalloc_node(memsize, gfp, node); +- init_arraycache(&alc->ac, entries, batch); +- spin_lock_init(&alc->lock); ++ if (alc) { ++ init_arraycache(&alc->ac, entries, batch); ++ spin_lock_init(&alc->lock); ++ } + return alc; + } + +diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c +index c5b0cb4f4056..41f6e964fe91 100644 +--- a/net/sunrpc/svc.c ++++ b/net/sunrpc/svc.c +@@ -1062,6 +1062,8 @@ void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) + static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {} + #endif + ++extern void svc_tcp_prep_reply_hdr(struct svc_rqst *); ++ + /* + * Common routine for processing the RPC request. + */ +@@ -1091,7 +1093,8 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) + clear_bit(RQ_DROPME, &rqstp->rq_flags); + + /* Setup reply header */ +- rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp); ++ if (rqstp->rq_prot == IPPROTO_TCP) ++ svc_tcp_prep_reply_hdr(rqstp); + + svc_putu32(resv, rqstp->rq_xid); + +@@ -1138,7 +1141,8 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) + case SVC_DENIED: + goto err_bad_auth; + case SVC_CLOSE: +- if (test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags)) ++ if (rqstp->rq_xprt && ++ test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags)) + svc_close_xprt(rqstp->rq_xprt); + case SVC_DROP: + goto dropit; +@@ -1360,10 +1364,10 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req, + dprintk("svc: %s(%p)\n", __func__, req); + + /* Build the svc_rqst used by the common processing routine */ +- rqstp->rq_xprt = serv->sv_bc_xprt; + rqstp->rq_xid = req->rq_xid; + rqstp->rq_prot = req->rq_xprt->prot; + rqstp->rq_server = serv; ++ rqstp->rq_bc_net = req->rq_xprt->xprt_net; + + rqstp->rq_addrlen = sizeof(req->rq_xprt->addr); + memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen); +diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c +index 71f15da72f02..2b8e80c721db 100644 +--- a/net/sunrpc/svc_xprt.c ++++ b/net/sunrpc/svc_xprt.c +@@ -454,10 +454,11 @@ out: + */ + void svc_reserve(struct svc_rqst *rqstp, int space) + { ++ struct svc_xprt *xprt = rqstp->rq_xprt; ++ + space += rqstp->rq_res.head[0].iov_len; + +- if (space < rqstp->rq_reserved) { +- struct svc_xprt *xprt = rqstp->rq_xprt; ++ if (xprt && space < rqstp->rq_reserved) { + atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved); + rqstp->rq_reserved = space; + +diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c +index 9701fcca002c..0a9fe033132c 100644 +--- a/net/sunrpc/svcsock.c ++++ b/net/sunrpc/svcsock.c +@@ -1240,7 +1240,7 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp) + /* + * Setup response header. TCP has a 4B record length field. + */ +-static void svc_tcp_prep_reply_hdr(struct svc_rqst *rqstp) ++void svc_tcp_prep_reply_hdr(struct svc_rqst *rqstp) + { + struct kvec *resv = &rqstp->rq_res.head[0]; + +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 0467e5ba82e0..5d8ac2d798df 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -4792,6 +4792,13 @@ static void alc280_fixup_hp_9480m(struct hda_codec *codec, + } + } + ++static void alc_fixup_disable_mic_vref(struct hda_codec *codec, ++ const struct hda_fixup *fix, int action) ++{ ++ if (action == HDA_FIXUP_ACT_PRE_PROBE) ++ snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ); ++} ++ + /* for hda_fixup_thinkpad_acpi() */ + #include "thinkpad_helper.c" + +@@ -4891,6 +4898,7 @@ enum { + ALC293_FIXUP_LENOVO_SPK_NOISE, + ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, + ALC255_FIXUP_DELL_SPK_NOISE, ++ ALC225_FIXUP_DISABLE_MIC_VREF, + ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, + ALC295_FIXUP_DISABLE_DAC3, + ALC280_FIXUP_HP_HEADSET_MIC, +@@ -5546,6 +5554,12 @@ static const struct hda_fixup alc269_fixups[] = { + .chained = true, + .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE + }, ++ [ALC225_FIXUP_DISABLE_MIC_VREF] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc_fixup_disable_mic_vref, ++ .chained = true, ++ .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE ++ }, + [ALC225_FIXUP_DELL1_MIC_NO_PRESENCE] = { + .type = HDA_FIXUP_VERBS, + .v.verbs = (const struct hda_verb[]) { +@@ -5555,7 +5569,7 @@ static const struct hda_fixup alc269_fixups[] = { + {} + }, + .chained = true, +- .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE ++ .chain_id = ALC225_FIXUP_DISABLE_MIC_VREF + }, + [ALC280_FIXUP_HP_HEADSET_MIC] = { + .type = HDA_FIXUP_FUNC,