Skip to content

Commit

Permalink
crypto: x86 - make constants readonly, allow linker to merge them
Browse files Browse the repository at this point in the history
A lot of asm-optimized routines in arch/x86/crypto/ keep its
constants in .data. This is wrong, they should be on .rodata.

Mnay of these constants are the same in different modules.
For example, 128-bit shuffle mask 0x000102030405060708090A0B0C0D0E0F
exists in at least half a dozen places.

There is a way to let linker merge them and use just one copy.
The rules are as follows: mergeable objects of different sizes
should not share sections. You can't put them all in one .rodata
section, they will lose "mergeability".

GCC puts its mergeable constants in ".rodata.cstSIZE" sections,
or ".rodata.cstSIZE.<object_name>" if -fdata-sections is used.
This patch does the same:

	.section .rodata.cst16.SHUF_MASK, "aM", @progbits, 16

It is important that all data in such section consists of
16-byte elements, not larger ones, and there are no implicit
use of one element from another.

When this is not the case, use non-mergeable section:

	.section .rodata[.VAR_NAME], "a", @progbits

This reduces .data by ~15 kbytes:

    text    data     bss     dec      hex filename
11097415 2705840 2630712 16433967  fac32f vmlinux-prev.o
11112095 2690672 2630712 16433479  fac147 vmlinux.o

Merged objects are visible in System.map:

ffffffff81a28810 r POLY
ffffffff81a28810 r POLY
ffffffff81a28820 r TWOONE
ffffffff81a28820 r TWOONE
ffffffff81a28830 r PSHUFFLE_BYTE_FLIP_MASK <- merged regardless of
ffffffff81a28830 r SHUF_MASK   <------------- the name difference
ffffffff81a28830 r SHUF_MASK
ffffffff81a28830 r SHUF_MASK
..
ffffffff81a28d00 r K512 <- merged three identical 640-byte tables
ffffffff81a28d00 r K512
ffffffff81a28d00 r K512

Use of object names in section name suffixes is not strictly necessary,
but might help if someday link stage will use garbage collection
to eliminate unused sections (ld --gc-sections).

Signed-off-by: Denys Vlasenko <[email protected]>
CC: Herbert Xu <[email protected]>
CC: Josh Poimboeuf <[email protected]>
CC: Xiaodong Liu <[email protected]>
CC: Megha Dey <[email protected]>
CC: [email protected]
CC: [email protected]
CC: [email protected]
Signed-off-by: Herbert Xu <[email protected]>
  • Loading branch information
dvlasenk authored and herbertx committed Jan 23, 2017
1 parent 587d531 commit e183914
Show file tree
Hide file tree
Showing 33 changed files with 229 additions and 74 deletions.
37 changes: 29 additions & 8 deletions arch/x86/crypto/aesni-intel_asm.S
Original file line number Diff line number Diff line change
Expand Up @@ -46,28 +46,49 @@

#ifdef __x86_64__

.data
# constants in mergeable sections, linker can reorder and merge
.section .rodata.cst16.gf128mul_x_ble_mask, "aM", @progbits, 16
.align 16
.Lgf128mul_x_ble_mask:
.octa 0x00000000000000010000000000000087
.section .rodata.cst16.POLY, "aM", @progbits, 16
.align 16
POLY: .octa 0xC2000000000000000000000000000001
.section .rodata.cst16.TWOONE, "aM", @progbits, 16
.align 16
TWOONE: .octa 0x00000001000000000000000000000001

# order of these constants should not change.
# more specifically, ALL_F should follow SHIFT_MASK,
# and ZERO should follow ALL_F

.section .rodata.cst16.SHUF_MASK, "aM", @progbits, 16
.align 16
SHUF_MASK: .octa 0x000102030405060708090A0B0C0D0E0F
.section .rodata.cst16.MASK1, "aM", @progbits, 16
.align 16
MASK1: .octa 0x0000000000000000ffffffffffffffff
.section .rodata.cst16.MASK2, "aM", @progbits, 16
.align 16
MASK2: .octa 0xffffffffffffffff0000000000000000
SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100
ALL_F: .octa 0xffffffffffffffffffffffffffffffff
ZERO: .octa 0x00000000000000000000000000000000
.section .rodata.cst16.ONE, "aM", @progbits, 16
.align 16
ONE: .octa 0x00000000000000000000000000000001
.section .rodata.cst16.F_MIN_MASK, "aM", @progbits, 16
.align 16
F_MIN_MASK: .octa 0xf1f2f3f4f5f6f7f8f9fafbfcfdfeff0
.section .rodata.cst16.dec, "aM", @progbits, 16
.align 16
dec: .octa 0x1
.section .rodata.cst16.enc, "aM", @progbits, 16
.align 16
enc: .octa 0x2

# order of these constants should not change.
# more specifically, ALL_F should follow SHIFT_MASK,
# and zero should follow ALL_F
.section .rodata, "a", @progbits
.align 16
SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100
ALL_F: .octa 0xffffffffffffffffffffffffffffffff
.octa 0x00000000000000000000000000000000


.text

Expand Down
32 changes: 24 additions & 8 deletions arch/x86/crypto/aesni-intel_avx-x86_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -122,23 +122,39 @@
#include <linux/linkage.h>
#include <asm/inst.h>

.data
# constants in mergeable sections, linker can reorder and merge
.section .rodata.cst16.POLY, "aM", @progbits, 16
.align 16

POLY: .octa 0xC2000000000000000000000000000001

.section .rodata.cst16.POLY2, "aM", @progbits, 16
.align 16
POLY2: .octa 0xC20000000000000000000001C2000000
TWOONE: .octa 0x00000001000000000000000000000001

# order of these constants should not change.
# more specifically, ALL_F should follow SHIFT_MASK, and ZERO should follow ALL_F
.section .rodata.cst16.TWOONE, "aM", @progbits, 16
.align 16
TWOONE: .octa 0x00000001000000000000000000000001

.section .rodata.cst16.SHUF_MASK, "aM", @progbits, 16
.align 16
SHUF_MASK: .octa 0x000102030405060708090A0B0C0D0E0F
SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100
ALL_F: .octa 0xffffffffffffffffffffffffffffffff
ZERO: .octa 0x00000000000000000000000000000000

.section .rodata.cst16.ONE, "aM", @progbits, 16
.align 16
ONE: .octa 0x00000000000000000000000000000001

.section .rodata.cst16.ONEf, "aM", @progbits, 16
.align 16
ONEf: .octa 0x01000000000000000000000000000000

# order of these constants should not change.
# more specifically, ALL_F should follow SHIFT_MASK, and zero should follow ALL_F
.section .rodata, "a", @progbits
.align 16
SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100
ALL_F: .octa 0xffffffffffffffffffffffffffffffff
.octa 0x00000000000000000000000000000000

.text


Expand Down
5 changes: 4 additions & 1 deletion arch/x86/crypto/camellia-aesni-avx-asm_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -571,7 +571,9 @@ ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
vmovdqu y6, 14 * 16(rio); \
vmovdqu y7, 15 * 16(rio);

.data

/* NB: section is mergeable, all elements must be aligned 16-byte blocks */
.section .rodata.cst16, "aM", @progbits, 16
.align 16

#define SHUFB_BYTES(idx) \
Expand Down Expand Up @@ -711,6 +713,7 @@ ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
.byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03

/* 4-bit mask */
.section .rodata.cst4.L0f0f0f0f, "aM", @progbits, 4
.align 4
.L0f0f0f0f:
.long 0x0f0f0f0f
Expand Down
12 changes: 9 additions & 3 deletions arch/x86/crypto/camellia-aesni-avx2-asm_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -610,20 +610,25 @@ ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
vmovdqu y6, 14 * 32(rio); \
vmovdqu y7, 15 * 32(rio);

.data
.align 32

.section .rodata.cst32.shufb_16x16b, "aM", @progbits, 32
.align 32
#define SHUFB_BYTES(idx) \
0 + (idx), 4 + (idx), 8 + (idx), 12 + (idx)

.Lshufb_16x16b:
.byte SHUFB_BYTES(0), SHUFB_BYTES(1), SHUFB_BYTES(2), SHUFB_BYTES(3)
.byte SHUFB_BYTES(0), SHUFB_BYTES(1), SHUFB_BYTES(2), SHUFB_BYTES(3)

.section .rodata.cst32.pack_bswap, "aM", @progbits, 32
.align 32
.Lpack_bswap:
.long 0x00010203, 0x04050607, 0x80808080, 0x80808080
.long 0x00010203, 0x04050607, 0x80808080, 0x80808080

/* NB: section is mergeable, all elements must be aligned 16-byte blocks */
.section .rodata.cst16, "aM", @progbits, 16
.align 16

/* For CTR-mode IV byteswap */
.Lbswap128_mask:
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
Expand Down Expand Up @@ -750,6 +755,7 @@ ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
.byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b
.byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03

.section .rodata.cst4.L0f0f0f0f, "aM", @progbits, 4
.align 4
/* 4-bit mask */
.L0f0f0f0f:
Expand Down
14 changes: 12 additions & 2 deletions arch/x86/crypto/cast5-avx-x86_64-asm_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -195,19 +195,29 @@
vpshufb rmask, x0, x0; \
vpshufb rmask, x1, x1;

.data

.section .rodata.cst16.bswap_mask, "aM", @progbits, 16
.align 16
.Lbswap_mask:
.byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
.section .rodata.cst16.bswap128_mask, "aM", @progbits, 16
.align 16
.Lbswap128_mask:
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
.section .rodata.cst16.bswap_iv_mask, "aM", @progbits, 16
.align 16
.Lbswap_iv_mask:
.byte 7, 6, 5, 4, 3, 2, 1, 0, 7, 6, 5, 4, 3, 2, 1, 0

.section .rodata.cst4.16_mask, "aM", @progbits, 4
.align 4
.L16_mask:
.byte 16, 16, 16, 16
.section .rodata.cst4.32_mask, "aM", @progbits, 4
.align 4
.L32_mask:
.byte 32, 0, 0, 0
.section .rodata.cst4.first_mask, "aM", @progbits, 4
.align 4
.Lfirst_mask:
.byte 0x1f, 0, 0, 0

Expand Down
12 changes: 10 additions & 2 deletions arch/x86/crypto/cast6-avx-x86_64-asm_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -225,8 +225,7 @@
vpshufb rmask, x2, x2; \
vpshufb rmask, x3, x3;

.data

.section .rodata.cst16, "aM", @progbits, 16
.align 16
.Lxts_gf128mul_and_shl1_mask:
.byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
Expand All @@ -244,10 +243,19 @@
.byte 12, 13, 14, 15, 8, 9, 10, 11, 7, 6, 5, 4, 3, 2, 1, 0
.Lrkr_dec_QBAR_QBAR_QBAR_QBAR:
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0

.section .rodata.cst4.L16_mask, "aM", @progbits, 4
.align 4
.L16_mask:
.byte 16, 16, 16, 16

.section .rodata.cst4.L32_mask, "aM", @progbits, 4
.align 4
.L32_mask:
.byte 32, 0, 0, 0

.section .rodata.cst4.first_mask, "aM", @progbits, 4
.align 4
.Lfirst_mask:
.byte 0x1f, 0, 0, 0

Expand Down
9 changes: 7 additions & 2 deletions arch/x86/crypto/chacha20-avx2-x86_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,18 @@

#include <linux/linkage.h>

.data
.section .rodata.cst32.ROT8, "aM", @progbits, 32
.align 32

ROT8: .octa 0x0e0d0c0f0a09080b0605040702010003
.octa 0x0e0d0c0f0a09080b0605040702010003

.section .rodata.cst32.ROT16, "aM", @progbits, 32
.align 32
ROT16: .octa 0x0d0c0f0e09080b0a0504070601000302
.octa 0x0d0c0f0e09080b0a0504070601000302

.section .rodata.cst32.CTRINC, "aM", @progbits, 32
.align 32
CTRINC: .octa 0x00000003000000020000000100000000
.octa 0x00000007000000060000000500000004

Expand Down
7 changes: 5 additions & 2 deletions arch/x86/crypto/chacha20-ssse3-x86_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,14 @@

#include <linux/linkage.h>

.data
.section .rodata.cst16.ROT8, "aM", @progbits, 16
.align 16

ROT8: .octa 0x0e0d0c0f0a09080b0605040702010003
.section .rodata.cst16.ROT16, "aM", @progbits, 16
.align 16
ROT16: .octa 0x0d0c0f0e09080b0a0504070601000302
.section .rodata.cst16.CTRINC, "aM", @progbits, 16
.align 16
CTRINC: .octa 0x00000003000000020000000100000000

.text
Expand Down
14 changes: 11 additions & 3 deletions arch/x86/crypto/crct10dif-pcl-asm_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -554,12 +554,11 @@ _only_less_than_2:

ENDPROC(crc_t10dif_pcl)

.data

.section .rodata, "a", @progbits
.align 16
# precomputed constants
# these constants are precomputed from the poly:
# 0x8bb70000 (0x8bb7 scaled to 32 bits)
.align 16
# Q = 0x18BB70000
# rk1 = 2^(32*3) mod Q << 32
# rk2 = 2^(32*5) mod Q << 32
Expand Down Expand Up @@ -613,14 +612,23 @@ rk20:



.section .rodata.cst16.mask1, "aM", @progbits, 16
.align 16
mask1:
.octa 0x80808080808080808080808080808080

.section .rodata.cst16.mask2, "aM", @progbits, 16
.align 16
mask2:
.octa 0x00000000FFFFFFFFFFFFFFFFFFFFFFFF

.section .rodata.cst16.SHUF_MASK, "aM", @progbits, 16
.align 16
SHUF_MASK:
.octa 0x000102030405060708090A0B0C0D0E0F

.section .rodata.cst32.pshufb_shf_table, "aM", @progbits, 32
.align 32
pshufb_shf_table:
# use these values for shift constants for the pshufb instruction
# different alignments result in values as shown:
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/crypto/des3_ede-asm_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -537,7 +537,7 @@ ENTRY(des3_ede_x86_64_crypt_blk_3way)
ret;
ENDPROC(des3_ede_x86_64_crypt_blk_3way)

.data
.section .rodata, "a", @progbits
.align 16
.L_s1:
.quad 0x0010100001010400, 0x0000000000000000
Expand Down
3 changes: 1 addition & 2 deletions arch/x86/crypto/ghash-clmulni-intel_asm.S
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,7 @@
#include <asm/inst.h>
#include <asm/frame.h>

.data

.section .rodata.cst16.bswap_mask, "aM", @progbits, 16
.align 16
.Lbswap_mask:
.octa 0x000102030405060708090a0b0c0d0e0f
Expand Down
6 changes: 4 additions & 2 deletions arch/x86/crypto/poly1305-avx2-x86_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,13 @@

#include <linux/linkage.h>

.data
.section .rodata.cst32.ANMASK, "aM", @progbits, 32
.align 32

ANMASK: .octa 0x0000000003ffffff0000000003ffffff
.octa 0x0000000003ffffff0000000003ffffff

.section .rodata.cst32.ORMASK, "aM", @progbits, 32
.align 32
ORMASK: .octa 0x00000000010000000000000001000000
.octa 0x00000000010000000000000001000000

Expand Down
6 changes: 4 additions & 2 deletions arch/x86/crypto/poly1305-sse2-x86_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,12 @@

#include <linux/linkage.h>

.data
.section .rodata.cst16.ANMASK, "aM", @progbits, 16
.align 16

ANMASK: .octa 0x0000000003ffffff0000000003ffffff

.section .rodata.cst16.ORMASK, "aM", @progbits, 16
.align 16
ORMASK: .octa 0x00000000010000000000000001000000

.text
Expand Down
5 changes: 3 additions & 2 deletions arch/x86/crypto/serpent-avx-x86_64-asm_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -29,11 +29,12 @@

.file "serpent-avx-x86_64-asm_64.S"

.data
.section .rodata.cst16.bswap128_mask, "aM", @progbits, 16
.align 16

.Lbswap128_mask:
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
.section .rodata.cst16.xts_gf128mul_and_shl1_mask, "aM", @progbits, 16
.align 16
.Lxts_gf128mul_and_shl1_mask:
.byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0

Expand Down
9 changes: 7 additions & 2 deletions arch/x86/crypto/serpent-avx2-asm_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,18 @@

.file "serpent-avx2-asm_64.S"

.data
.section .rodata.cst16.bswap128_mask, "aM", @progbits, 16
.align 16

.Lbswap128_mask:
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0

.section .rodata.cst16.xts_gf128mul_and_shl1_mask_0, "aM", @progbits, 16
.align 16
.Lxts_gf128mul_and_shl1_mask_0:
.byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0

.section .rodata.cst16.xts_gf128mul_and_shl1_mask_1, "aM", @progbits, 16
.align 16
.Lxts_gf128mul_and_shl1_mask_1:
.byte 0x0e, 1, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0

Expand Down
Loading

0 comments on commit e183914

Please sign in to comment.