Re: [PATCH 11/14] swiotlb: remove swiotlb_set_mem_attributes
On 3/19/2018 5:38 AM, Christoph Hellwig wrote: > Now that set_memory_decrypted is always available we can just call it > directly. > > Signed-off-by: Christoph Hellwig> Reviewed-by: Konrad Rzeszutek Wilk Reviewed-by: Tom Lendacky > --- > arch/x86/include/asm/mem_encrypt.h | 2 -- > arch/x86/mm/mem_encrypt.c | 9 - > lib/swiotlb.c | 12 ++-- > 3 files changed, 6 insertions(+), 17 deletions(-) > > diff --git a/arch/x86/include/asm/mem_encrypt.h > b/arch/x86/include/asm/mem_encrypt.h > index 22c5f3e6f820..9da0b63c8fc7 100644 > --- a/arch/x86/include/asm/mem_encrypt.h > +++ b/arch/x86/include/asm/mem_encrypt.h > @@ -48,8 +48,6 @@ int __init early_set_memory_encrypted(unsigned long vaddr, > unsigned long size); > /* Architecture __weak replacement functions */ > void __init mem_encrypt_init(void); > > -void swiotlb_set_mem_attributes(void *vaddr, unsigned long size); > - > bool sme_active(void); > bool sev_active(void); > > diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c > index 66beedc8fe3d..d3b80d5f9828 100644 > --- a/arch/x86/mm/mem_encrypt.c > +++ b/arch/x86/mm/mem_encrypt.c > @@ -446,15 +446,6 @@ void __init mem_encrypt_init(void) >: "Secure Memory Encryption (SME)"); > } > > -void swiotlb_set_mem_attributes(void *vaddr, unsigned long size) > -{ > - WARN(PAGE_ALIGN(size) != size, > - "size is not page-aligned (%#lx)\n", size); > - > - /* Make the SWIOTLB buffer area decrypted */ > - set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT); > -} > - > struct sme_populate_pgd_data { > void*pgtable_area; > pgd_t *pgd; > diff --git a/lib/swiotlb.c b/lib/swiotlb.c > index c43ec2271469..005d1d87bb2e 100644 > --- a/lib/swiotlb.c > +++ b/lib/swiotlb.c > @@ -31,6 +31,7 @@ > #include > #include > #include > +#include > > #include > #include > @@ -156,8 +157,6 @@ unsigned long swiotlb_size_or_default(void) > return size ? size : (IO_TLB_DEFAULT_SIZE); > } > > -void __weak swiotlb_set_mem_attributes(void *vaddr, unsigned long size) { } > - > /* For swiotlb, clear memory encryption mask from dma addresses */ > static dma_addr_t swiotlb_phys_to_dma(struct device *hwdev, > phys_addr_t address) > @@ -209,12 +208,12 @@ void __init swiotlb_update_mem_attributes(void) > > vaddr = phys_to_virt(io_tlb_start); > bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT); > - swiotlb_set_mem_attributes(vaddr, bytes); > + set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); > memset(vaddr, 0, bytes); > > vaddr = phys_to_virt(io_tlb_overflow_buffer); > bytes = PAGE_ALIGN(io_tlb_overflow); > - swiotlb_set_mem_attributes(vaddr, bytes); > + set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); > memset(vaddr, 0, bytes); > } > > @@ -355,7 +354,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long > nslabs) > io_tlb_start = virt_to_phys(tlb); > io_tlb_end = io_tlb_start + bytes; > > - swiotlb_set_mem_attributes(tlb, bytes); > + set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT); > memset(tlb, 0, bytes); > > /* > @@ -366,7 +365,8 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long > nslabs) > if (!v_overflow_buffer) > goto cleanup2; > > - swiotlb_set_mem_attributes(v_overflow_buffer, io_tlb_overflow); > + set_memory_decrypted((unsigned long)v_overflow_buffer, > + io_tlb_overflow >> PAGE_SHIFT); > memset(v_overflow_buffer, 0, io_tlb_overflow); > io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer); > > ___ iommu mailing list iommu@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/iommu
[PATCH 11/14] swiotlb: remove swiotlb_set_mem_attributes
Now that set_memory_decrypted is always available we can just call it directly. Signed-off-by: Christoph HellwigReviewed-by: Konrad Rzeszutek Wilk --- arch/x86/include/asm/mem_encrypt.h | 2 -- arch/x86/mm/mem_encrypt.c | 9 - lib/swiotlb.c | 12 ++-- 3 files changed, 6 insertions(+), 17 deletions(-) diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 22c5f3e6f820..9da0b63c8fc7 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -48,8 +48,6 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size); /* Architecture __weak replacement functions */ void __init mem_encrypt_init(void); -void swiotlb_set_mem_attributes(void *vaddr, unsigned long size); - bool sme_active(void); bool sev_active(void); diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 66beedc8fe3d..d3b80d5f9828 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -446,15 +446,6 @@ void __init mem_encrypt_init(void) : "Secure Memory Encryption (SME)"); } -void swiotlb_set_mem_attributes(void *vaddr, unsigned long size) -{ - WARN(PAGE_ALIGN(size) != size, -"size is not page-aligned (%#lx)\n", size); - - /* Make the SWIOTLB buffer area decrypted */ - set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT); -} - struct sme_populate_pgd_data { void*pgtable_area; pgd_t *pgd; diff --git a/lib/swiotlb.c b/lib/swiotlb.c index c43ec2271469..005d1d87bb2e 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -156,8 +157,6 @@ unsigned long swiotlb_size_or_default(void) return size ? size : (IO_TLB_DEFAULT_SIZE); } -void __weak swiotlb_set_mem_attributes(void *vaddr, unsigned long size) { } - /* For swiotlb, clear memory encryption mask from dma addresses */ static dma_addr_t swiotlb_phys_to_dma(struct device *hwdev, phys_addr_t address) @@ -209,12 +208,12 @@ void __init swiotlb_update_mem_attributes(void) vaddr = phys_to_virt(io_tlb_start); bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT); - swiotlb_set_mem_attributes(vaddr, bytes); + set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); memset(vaddr, 0, bytes); vaddr = phys_to_virt(io_tlb_overflow_buffer); bytes = PAGE_ALIGN(io_tlb_overflow); - swiotlb_set_mem_attributes(vaddr, bytes); + set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); memset(vaddr, 0, bytes); } @@ -355,7 +354,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) io_tlb_start = virt_to_phys(tlb); io_tlb_end = io_tlb_start + bytes; - swiotlb_set_mem_attributes(tlb, bytes); + set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT); memset(tlb, 0, bytes); /* @@ -366,7 +365,8 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) if (!v_overflow_buffer) goto cleanup2; - swiotlb_set_mem_attributes(v_overflow_buffer, io_tlb_overflow); + set_memory_decrypted((unsigned long)v_overflow_buffer, + io_tlb_overflow >> PAGE_SHIFT); memset(v_overflow_buffer, 0, io_tlb_overflow); io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer); -- 2.14.2 ___ iommu mailing list iommu@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/iommu
Re: [PATCH 11/14] swiotlb: remove swiotlb_set_mem_attributes
On Thu, Mar 15, 2018 at 01:51:59PM -0400, Konrad Rzeszutek Wilk wrote: > On Wed, Mar 14, 2018 at 06:52:10PM +0100, Christoph Hellwig wrote: > > Now that set_memory_decrypted is always available we can just call it > > directly. > > > > Won't this break ARM build? And of course right after asking that question I went back to the previous patch as I had that in my mind it was arch/x86/include, not the global one. So pls ignore my query and instead: Reviewed-by: Konrad Rzeszutek Wilk___ iommu mailing list iommu@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/iommu
Re: [PATCH 11/14] swiotlb: remove swiotlb_set_mem_attributes
On Wed, Mar 14, 2018 at 06:52:10PM +0100, Christoph Hellwig wrote: > Now that set_memory_decrypted is always available we can just call it > directly. > Won't this break ARM build? ___ iommu mailing list iommu@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/iommu
[PATCH 11/14] swiotlb: remove swiotlb_set_mem_attributes
Now that set_memory_decrypted is always available we can just call it directly. Signed-off-by: Christoph Hellwig--- arch/x86/include/asm/mem_encrypt.h | 2 -- arch/x86/mm/mem_encrypt.c | 9 - lib/swiotlb.c | 12 ++-- 3 files changed, 6 insertions(+), 17 deletions(-) diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 22c5f3e6f820..9da0b63c8fc7 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -48,8 +48,6 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size); /* Architecture __weak replacement functions */ void __init mem_encrypt_init(void); -void swiotlb_set_mem_attributes(void *vaddr, unsigned long size); - bool sme_active(void); bool sev_active(void); diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 66beedc8fe3d..d3b80d5f9828 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -446,15 +446,6 @@ void __init mem_encrypt_init(void) : "Secure Memory Encryption (SME)"); } -void swiotlb_set_mem_attributes(void *vaddr, unsigned long size) -{ - WARN(PAGE_ALIGN(size) != size, -"size is not page-aligned (%#lx)\n", size); - - /* Make the SWIOTLB buffer area decrypted */ - set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT); -} - struct sme_populate_pgd_data { void*pgtable_area; pgd_t *pgd; diff --git a/lib/swiotlb.c b/lib/swiotlb.c index c43ec2271469..005d1d87bb2e 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -156,8 +157,6 @@ unsigned long swiotlb_size_or_default(void) return size ? size : (IO_TLB_DEFAULT_SIZE); } -void __weak swiotlb_set_mem_attributes(void *vaddr, unsigned long size) { } - /* For swiotlb, clear memory encryption mask from dma addresses */ static dma_addr_t swiotlb_phys_to_dma(struct device *hwdev, phys_addr_t address) @@ -209,12 +208,12 @@ void __init swiotlb_update_mem_attributes(void) vaddr = phys_to_virt(io_tlb_start); bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT); - swiotlb_set_mem_attributes(vaddr, bytes); + set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); memset(vaddr, 0, bytes); vaddr = phys_to_virt(io_tlb_overflow_buffer); bytes = PAGE_ALIGN(io_tlb_overflow); - swiotlb_set_mem_attributes(vaddr, bytes); + set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); memset(vaddr, 0, bytes); } @@ -355,7 +354,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) io_tlb_start = virt_to_phys(tlb); io_tlb_end = io_tlb_start + bytes; - swiotlb_set_mem_attributes(tlb, bytes); + set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT); memset(tlb, 0, bytes); /* @@ -366,7 +365,8 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) if (!v_overflow_buffer) goto cleanup2; - swiotlb_set_mem_attributes(v_overflow_buffer, io_tlb_overflow); + set_memory_decrypted((unsigned long)v_overflow_buffer, + io_tlb_overflow >> PAGE_SHIFT); memset(v_overflow_buffer, 0, io_tlb_overflow); io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer); -- 2.14.2 ___ iommu mailing list iommu@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/iommu