[Intel-gfx] [PATCH 3/3] [RFC] Revert "drm/i915: use variadic macros and arrays to choose port/pipe based registers"
Arnd Bergmann
arnd at arndb.de
Mon Mar 20 10:39:17 UTC 2017
On Mon, Mar 20, 2017 at 11:08 AM, Jani Nikula
<jani.nikula at linux.intel.com> wrote:
> On Mon, 20 Mar 2017, Arnd Bergmann <arnd at arndb.de> wrote:
>> The varargs macro trick in _PIPE3/_PHY3/_PORT3 was meant as an optimization
>> to shrink the i915 kernel module by around 1000 bytes.
>
> To be clear, it was not at all intended to be an optimization, nothing
> of the sort. The intention was to make it easier and less error prone to
> add more parameters to said macros. The text size shring was just a
> bonus.
>
>> However, the
>> downside is a size regression with CONFIG_KASAN, as I found from stack size
>> warnings with gcc-7.0.1:
>
> In his review of the original change, Chris provided this comparison
> https://godbolt.org/g/YCK1od
>
> How does CONFIG_KASAN change this? Would be nice to see how the
> generated code blows up.
>
I don't know how to generate a URL for it, but after adding this to the
command line for gcc-7,
-fsanitize=kernel-address -fasan-shadow-offset=0xdfff900000000000
--param asan-stack=1 --param asan-globals=1 --param
asan-instrumentation-with-call-threshold=10000
-fsanitize-address-use-after-scope
the code turned from really nice into the log series of checks below.
Without -fsanitize-address-use-after-scope (which didn't exist before gcc-7),
it's less bad but still exceeds the (arbitrary) 1536 byte limit.
Arnd
.LC0:
.string "2 32 4 1 i 96 24 9 <unknown> "
main:
.LASANPC0:
pushq %r12
pushq %rbp
movabsq $-2305966154516004864, %rdx
pushq %rbx
subq $160, %rsp
movq %rsp, %rbp
leaq 96(%rsp), %rbx
movq $1102416563, (%rsp)
shrq $3, %rbp
movq $.LC0, 8(%rsp)
movq $.LASANPC0, 16(%rsp)
leaq 0(%rbp,%rdx), %rax
movl $-235802127, (%rax)
movl $-218959356, 4(%rax)
movl $-218959118, 8(%rax)
movl $-234881024, 12(%rax)
movl $-202116109, 16(%rax)
movq %rbx, %rax
movl $1, 32(%rsp)
shrq $3, %rax
movzbl (%rax,%rdx), %eax
testb %al, %al
je .L2
cmpb $3, %al
jle .L53
.L2:
leaq 4(%rbx), %rdi
movabsq $-2305966154516004864, %rax
movl $0, 96(%rsp)
movq %rdi, %rdx
shrq $3, %rdx
movzbl (%rdx,%rax), %edx
movq %rdi, %rax
andl $7, %eax
addl $3, %eax
cmpb %dl, %al
jl .L3
testb %dl, %dl
jne .L54
.L3:
leaq 8(%rbx), %rdi
movabsq $-2305966154516004864, %rax
movl $1, 100(%rsp)
movq %rdi, %rdx
shrq $3, %rdx
movzbl (%rdx,%rax), %eax
testb %al, %al
je .L4
cmpb $3, %al
jle .L55
.L4:
leaq 12(%rbx), %rdi
movabsq $-2305966154516004864, %rax
movl $2, 104(%rsp)
movq %rdi, %rdx
shrq $3, %rdx
movzbl (%rdx,%rax), %edx
movq %rdi, %rax
andl $7, %eax
addl $3, %eax
cmpb %dl, %al
jl .L5
testb %dl, %dl
jne .L56
.L5:
leaq 16(%rbx), %rdi
movabsq $-2305966154516004864, %rax
movl $3, 108(%rsp)
movq %rdi, %rdx
shrq $3, %rdx
movzbl (%rdx,%rax), %eax
testb %al, %al
je .L6
cmpb $3, %al
jle .L57
.L6:
leaq 20(%rbx), %rdi
movabsq $-2305966154516004864, %rax
movl $4, 112(%rsp)
movq %rdi, %rdx
shrq $3, %rdx
movzbl (%rdx,%rax), %edx
movq %rdi, %rax
andl $7, %eax
addl $3, %eax
cmpb %dl, %al
jl .L7
testb %dl, %dl
jne .L58
.L7:
movslq 32(%rsp), %r12
movabsq $-2305966154516004864, %rax
movl $5, 116(%rsp)
leaq (%rbx,%r12,4), %rdi
movq %rdi, %rdx
shrq $3, %rdx
movzbl (%rdx,%rax), %edx
movq %rdi, %rax
andl $7, %eax
addl $3, %eax
cmpb %dl, %al
jl .L8
testb %dl, %dl
jne .L59
.L8:
pxor %xmm0, %xmm0
movabsq $-2305966154516004864, %rdx
movl 96(%rsp,%r12,4), %eax
movl $0, 16(%rdx,%rbp)
movups %xmm0, 0(%rbp,%rdx)
addq $160, %rsp
popq %rbx
popq %rbp
popq %r12
ret
.L59:
call __asan_report_load4_noabort
jmp .L8
.L58:
call __asan_report_store4_noabort
jmp .L7
.L57:
call __asan_report_store4_noabort
jmp .L6
.L56:
call __asan_report_store4_noabort
jmp .L5
.L55:
call __asan_report_store4_noabort
jmp .L4
.L54:
call __asan_report_store4_noabort
jmp .L3
.L53:
movq %rbx, %rdi
call __asan_report_store4_noabort
jmp .L2
More information about the Intel-gfx
mailing list