Skip to content

Commit 29b5eb5

Browse files
James Hogangregkh
authored andcommitted
metag/usercopy: Zero rest of buffer from copy_from_user
commit 563ddc1076109f2b3f88e6d355eab7b6fd4662cb upstream. Currently we try to zero the destination for a failed read from userland in fixup code in the usercopy.c macros. The rest of the destination buffer is then zeroed from __copy_user_zeroing(), which is used for both copy_from_user() and __copy_from_user(). Unfortunately we fail to zero in the fixup code as D1Ar1 is set to 0 before the fixup code entry labels, and __copy_from_user() shouldn't even be zeroing the rest of the buffer. Move the zeroing out into copy_from_user() and rename __copy_user_zeroing() to raw_copy_from_user() since it no longer does any zeroing. This also conveniently matches the name needed for RAW_COPY_USER support in a later patch. Fixes: 373cd78 ("metag: Memory handling") Reported-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: linux-metag@vger.kernel.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent dde6f22 commit 29b5eb5

2 files changed

Lines changed: 26 additions & 46 deletions

File tree

arch/metag/include/asm/uaccess.h

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -197,20 +197,21 @@ extern long __must_check strnlen_user(const char __user *src, long count);
197197

198198
#define strlen_user(str) strnlen_user(str, 32767)
199199

200-
extern unsigned long __must_check __copy_user_zeroing(void *to,
201-
const void __user *from,
202-
unsigned long n);
200+
extern unsigned long raw_copy_from_user(void *to, const void __user *from,
201+
unsigned long n);
203202

204203
static inline unsigned long
205204
copy_from_user(void *to, const void __user *from, unsigned long n)
206205
{
206+
unsigned long res = n;
207207
if (likely(access_ok(VERIFY_READ, from, n)))
208-
return __copy_user_zeroing(to, from, n);
209-
memset(to, 0, n);
210-
return n;
208+
res = raw_copy_from_user(to, from, n);
209+
if (unlikely(res))
210+
memset(to + (n - res), 0, res);
211+
return res;
211212
}
212213

213-
#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n)
214+
#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n)
214215
#define __copy_from_user_inatomic __copy_from_user
215216

216217
extern unsigned long __must_check __copy_user(void __user *to,

arch/metag/lib/usercopy.c

Lines changed: 18 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@
2929
COPY \
3030
"1:\n" \
3131
" .section .fixup,\"ax\"\n" \
32-
" MOV D1Ar1,#0\n" \
3332
FIXUP \
3433
" MOVT D1Ar1,#HI(1b)\n" \
3534
" JUMP D1Ar1,#LO(1b)\n" \
@@ -637,16 +636,14 @@ EXPORT_SYMBOL(__copy_user);
637636
__asm_copy_user_cont(to, from, ret, \
638637
" GETB D1Ar1,[%1++]\n" \
639638
"2: SETB [%0++],D1Ar1\n", \
640-
"3: ADD %2,%2,#1\n" \
641-
" SETB [%0++],D1Ar1\n", \
639+
"3: ADD %2,%2,#1\n", \
642640
" .long 2b,3b\n")
643641

644642
#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
645643
__asm_copy_user_cont(to, from, ret, \
646644
" GETW D1Ar1,[%1++]\n" \
647645
"2: SETW [%0++],D1Ar1\n" COPY, \
648-
"3: ADD %2,%2,#2\n" \
649-
" SETW [%0++],D1Ar1\n" FIXUP, \
646+
"3: ADD %2,%2,#2\n" FIXUP, \
650647
" .long 2b,3b\n" TENTRY)
651648

652649
#define __asm_copy_from_user_2(to, from, ret) \
@@ -656,32 +653,26 @@ EXPORT_SYMBOL(__copy_user);
656653
__asm_copy_from_user_2x_cont(to, from, ret, \
657654
" GETB D1Ar1,[%1++]\n" \
658655
"4: SETB [%0++],D1Ar1\n", \
659-
"5: ADD %2,%2,#1\n" \
660-
" SETB [%0++],D1Ar1\n", \
656+
"5: ADD %2,%2,#1\n", \
661657
" .long 4b,5b\n")
662658

663659
#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
664660
__asm_copy_user_cont(to, from, ret, \
665661
" GETD D1Ar1,[%1++]\n" \
666662
"2: SETD [%0++],D1Ar1\n" COPY, \
667-
"3: ADD %2,%2,#4\n" \
668-
" SETD [%0++],D1Ar1\n" FIXUP, \
663+
"3: ADD %2,%2,#4\n" FIXUP, \
669664
" .long 2b,3b\n" TENTRY)
670665

671666
#define __asm_copy_from_user_4(to, from, ret) \
672667
__asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
673668

674-
675669
#define __asm_copy_from_user_8x64(to, from, ret) \
676670
asm volatile ( \
677671
" GETL D0Ar2,D1Ar1,[%1++]\n" \
678672
"2: SETL [%0++],D0Ar2,D1Ar1\n" \
679673
"1:\n" \
680674
" .section .fixup,\"ax\"\n" \
681-
" MOV D1Ar1,#0\n" \
682-
" MOV D0Ar2,#0\n" \
683675
"3: ADD %2,%2,#8\n" \
684-
" SETL [%0++],D0Ar2,D1Ar1\n" \
685676
" MOVT D0Ar2,#HI(1b)\n" \
686677
" JUMP D0Ar2,#LO(1b)\n" \
687678
" .previous\n" \
@@ -721,11 +712,12 @@ EXPORT_SYMBOL(__copy_user);
721712
"SUB %1, %1, #4\n")
722713

723714

724-
/* Copy from user to kernel, zeroing the bytes that were inaccessible in
725-
userland. The return-value is the number of bytes that were
726-
inaccessible. */
727-
unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
728-
unsigned long n)
715+
/*
716+
* Copy from user to kernel. The return-value is the number of bytes that were
717+
* inaccessible.
718+
*/
719+
unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
720+
unsigned long n)
729721
{
730722
register char *dst asm ("A0.2") = pdst;
731723
register const char __user *src asm ("A1.2") = psrc;
@@ -738,30 +730,30 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
738730
__asm_copy_from_user_1(dst, src, retn);
739731
n--;
740732
if (retn)
741-
goto copy_exception_bytes;
733+
return retn + n;
742734
}
743735
if ((unsigned long) dst & 1) {
744736
/* Worst case - byte copy */
745737
while (n > 0) {
746738
__asm_copy_from_user_1(dst, src, retn);
747739
n--;
748740
if (retn)
749-
goto copy_exception_bytes;
741+
return retn + n;
750742
}
751743
}
752744
if (((unsigned long) src & 2) && n >= 2) {
753745
__asm_copy_from_user_2(dst, src, retn);
754746
n -= 2;
755747
if (retn)
756-
goto copy_exception_bytes;
748+
return retn + n;
757749
}
758750
if ((unsigned long) dst & 2) {
759751
/* Second worst case - word copy */
760752
while (n >= 2) {
761753
__asm_copy_from_user_2(dst, src, retn);
762754
n -= 2;
763755
if (retn)
764-
goto copy_exception_bytes;
756+
return retn + n;
765757
}
766758
}
767759

@@ -777,7 +769,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
777769
__asm_copy_from_user_8x64(dst, src, retn);
778770
n -= 8;
779771
if (retn)
780-
goto copy_exception_bytes;
772+
return retn + n;
781773
}
782774
}
783775

@@ -793,7 +785,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
793785
__asm_copy_from_user_8x64(dst, src, retn);
794786
n -= 8;
795787
if (retn)
796-
goto copy_exception_bytes;
788+
return retn + n;
797789
}
798790
}
799791
#endif
@@ -803,7 +795,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
803795
n -= 4;
804796

805797
if (retn)
806-
goto copy_exception_bytes;
798+
return retn + n;
807799
}
808800

809801
/* If we get here, there were no memory read faults. */
@@ -829,21 +821,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
829821
/* If we get here, retn correctly reflects the number of failing
830822
bytes. */
831823
return retn;
832-
833-
copy_exception_bytes:
834-
/* We already have "retn" bytes cleared, and need to clear the
835-
remaining "n" bytes. A non-optimized simple byte-for-byte in-line
836-
memset is preferred here, since this isn't speed-critical code and
837-
we'd rather have this a leaf-function than calling memset. */
838-
{
839-
char *endp;
840-
for (endp = dst + n; dst < endp; dst++)
841-
*dst = 0;
842-
}
843-
844-
return retn + n;
845824
}
846-
EXPORT_SYMBOL(__copy_user_zeroing);
825+
EXPORT_SYMBOL(raw_copy_from_user);
847826

848827
#define __asm_clear_8x64(to, ret) \
849828
asm volatile ( \

0 commit comments

Comments
 (0)