summaryrefslogtreecommitdiff
path: root/libavcodec/x86
diff options
context:
space:
mode:
authorVittorio Giovara <vittorio.giovara@gmail.com>2016-04-27 13:45:23 -0400
committerDiego Biurrun <diego@biurrun.de>2016-05-04 18:16:21 +0200
commit41ed7ab45fc693f7d7fc35664c0233f4c32d69bb (patch)
tree146a086cf7c1881d55f9261b58138983e13af21c /libavcodec/x86
parent5c31eaa9998b2185e0aa04d11adff128498dc14a (diff)
cosmetics: Fix spelling mistakes
Signed-off-by: Diego Biurrun <diego@biurrun.de>
Diffstat (limited to 'libavcodec/x86')
-rw-r--r--libavcodec/x86/cabac.h2
-rw-r--r--libavcodec/x86/h264_chromamc.asm2
-rw-r--r--libavcodec/x86/h264_i386.h2
-rw-r--r--libavcodec/x86/inline_asm.h2
-rw-r--r--libavcodec/x86/mpegvideo.c4
-rw-r--r--libavcodec/x86/rv40dsp.asm10
-rw-r--r--libavcodec/x86/vc1dsp_mmx.c14
7 files changed, 18 insertions, 18 deletions
diff --git a/libavcodec/x86/cabac.h b/libavcodec/x86/cabac.h
index d1701bf071..40c29947eb 100644
--- a/libavcodec/x86/cabac.h
+++ b/libavcodec/x86/cabac.h
@@ -42,7 +42,7 @@
"sub "tmp" , "low" \n\t"
#else /* HAVE_FAST_CMOV */
#define BRANCHLESS_GET_CABAC_UPDATE(ret, retq, low, range, tmp) \
-/* P4 Prescott has crappy cmov,sbb,64bit shift so avoid them */ \
+/* P4 Prescott has crappy cmov,sbb,64-bit shift so avoid them */ \
"sub "low" , "tmp" \n\t"\
"sar $31 , "tmp" \n\t"\
"sub %%ecx , "range" \n\t"\
diff --git a/libavcodec/x86/h264_chromamc.asm b/libavcodec/x86/h264_chromamc.asm
index cc41f00461..1447940857 100644
--- a/libavcodec/x86/h264_chromamc.asm
+++ b/libavcodec/x86/h264_chromamc.asm
@@ -1,5 +1,5 @@
;******************************************************************************
-;* MMX/SSSE3-optimized functions for H264 chroma MC
+;* MMX/SSSE3-optimized functions for H.264 chroma MC
;* Copyright (c) 2005 Zoltan Hidvegi <hzoli -a- hzoli -d- com>,
;* 2005-2008 Loren Merritt
;*
diff --git a/libavcodec/x86/h264_i386.h b/libavcodec/x86/h264_i386.h
index bb881c35df..fb33e40784 100644
--- a/libavcodec/x86/h264_i386.h
+++ b/libavcodec/x86/h264_i386.h
@@ -21,7 +21,7 @@
/**
* @file
- * H.264 / AVC / MPEG4 part10 codec.
+ * H.264 / AVC / MPEG-4 part10 codec.
* non-MMX i386-specific optimizations for H.264
* @author Michael Niedermayer <michaelni@gmx.at>
*/
diff --git a/libavcodec/x86/inline_asm.h b/libavcodec/x86/inline_asm.h
index e4affabc87..fc554bfc8d 100644
--- a/libavcodec/x86/inline_asm.h
+++ b/libavcodec/x86/inline_asm.h
@@ -50,7 +50,7 @@
#endif
// using regr as temporary and for the output result
-// first argument is unmodifed and second is trashed
+// first argument is unmodified and second is trashed
// regfe is supposed to contain 0xfefefefefefefefe
#define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
"movq "#rega", "#regr" \n\t" \
diff --git a/libavcodec/x86/mpegvideo.c b/libavcodec/x86/mpegvideo.c
index b739a8f9a8..33d5cd814d 100644
--- a/libavcodec/x86/mpegvideo.c
+++ b/libavcodec/x86/mpegvideo.c
@@ -1,6 +1,6 @@
/*
* Optimized for ia32 CPUs by Nick Kurshev <nickols_k@mail.ru>
- * h263, mpeg1, mpeg2 dequantizer & draw_edges by Michael Niedermayer <michaelni@gmx.at>
+ * H.263, MPEG-1, MPEG-2 dequantizer & draw_edges by Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of Libav.
*
@@ -179,7 +179,7 @@ static void dct_unquantize_mpeg1_intra_mmx(MpegEncContext *s,
block0 = block[0] * s->y_dc_scale;
else
block0 = block[0] * s->c_dc_scale;
- /* XXX: only mpeg1 */
+ /* XXX: only MPEG-1 */
quant_matrix = s->intra_matrix;
__asm__ volatile(
"pcmpeqw %%mm7, %%mm7 \n\t"
diff --git a/libavcodec/x86/rv40dsp.asm b/libavcodec/x86/rv40dsp.asm
index 0a242b54e3..77f6ddb25d 100644
--- a/libavcodec/x86/rv40dsp.asm
+++ b/libavcodec/x86/rv40dsp.asm
@@ -335,14 +335,14 @@ INIT_XMM ssse3
FILTER_SSSE3 put
FILTER_SSSE3 avg
-; %1=5bits weights?, %2=dst %3=src1 %4=src3 %5=stride if sse2
+; %1=5-bit weights?, %2=dst %3=src1 %4=src3 %5=stride if SSE2
%macro RV40_WCORE 4-5
movh m4, [%3 + r6 + 0]
movh m5, [%4 + r6 + 0]
%if %0 == 4
%define OFFSET r6 + mmsize / 2
%else
- ; 8x8 block and sse2, stride was provided
+ ; 8x8 block and SSE2, stride was provided
%define OFFSET r6
add r6, r5
%endif
@@ -350,7 +350,7 @@ FILTER_SSSE3 avg
movh m7, [%4 + OFFSET]
%if %1 == 0
- ; 14bits weights
+ ; 14-bit weights
punpcklbw m4, m0
punpcklbw m5, m0
punpcklbw m6, m0
@@ -368,7 +368,7 @@ FILTER_SSSE3 avg
paddw m4, m5
paddw m6, m7
%else
- ; 5bits weights
+ ; 5-bit weights
%if cpuflag(ssse3)
punpcklbw m4, m5
punpcklbw m6, m7
@@ -404,7 +404,7 @@ FILTER_SSSE3 avg
packuswb m4, m6
%if %0 == 5
- ; Only called for 8x8 blocks and sse2
+ ; Only called for 8x8 blocks and SSE2
sub r6, r5
movh [%2 + r6], m4
add r6, r5
diff --git a/libavcodec/x86/vc1dsp_mmx.c b/libavcodec/x86/vc1dsp_mmx.c
index 046affbc26..95f5ee42de 100644
--- a/libavcodec/x86/vc1dsp_mmx.c
+++ b/libavcodec/x86/vc1dsp_mmx.c
@@ -81,7 +81,7 @@
"movq %%mm"#R1", "#OFF"(%1) \n\t" \
"add %2, %0 \n\t"
-/** Sacrifying mm6 allows to pipeline loads from src */
+/** Sacrificing mm6 allows to pipeline loads from src */
static void vc1_put_ver_16b_shift2_mmx(int16_t *dst,
const uint8_t *src, x86_reg stride,
int rnd, int64_t shift)
@@ -165,7 +165,7 @@ VC1_HOR_16b_SHIFT2(OP_AVG, avg_)
/**
* Purely vertical or horizontal 1/2 shift interpolation.
- * Sacrify mm6 for *9 factor.
+ * Sacrifice mm6 for *9 factor.
*/
#define VC1_SHIFT2(OP, OPNAME)\
static void OPNAME ## vc1_shift2_mmx(uint8_t *dst, const uint8_t *src,\
@@ -223,7 +223,7 @@ VC1_SHIFT2(OP_AVG, avg_)
/**
* Core of the 1/4 and 3/4 shift bicubic interpolation.
*
- * @param UNPACK Macro unpacking arguments from 8 to 16bits (can be empty).
+ * @param UNPACK Macro unpacking arguments from 8 to 16 bits (can be empty).
* @param MOVQ "movd 1" or "movq 2", if data read is already unpacked.
* @param A1 Address of 1st tap (beware of unpacked/packed).
* @param A2 Address of 2nd tap
@@ -263,7 +263,7 @@ VC1_SHIFT2(OP_AVG, avg_)
"paddw %%mm2, %%mm4 \n\t" /* 4,53,18,-3 */
/**
- * Macro to build the vertical 16bits version of vc1_put_shift[13].
+ * Macro to build the vertical 16 bits version of vc1_put_shift[13].
* Here, offset=src_stride. Parameters passed A1 to A4 must use
* %3 (src_stride) and %4 (3*src_stride).
*
@@ -320,8 +320,8 @@ vc1_put_ver_16b_ ## NAME ## _mmx(int16_t *dst, const uint8_t *src, \
}
/**
- * Macro to build the horizontal 16bits version of vc1_put_shift[13].
- * Here, offset=16bits, so parameters passed A1 to A4 should be simple.
+ * Macro to build the horizontal 16 bits version of vc1_put_shift[13].
+ * Here, offset=16 bits, so parameters passed A1 to A4 should be simple.
*
* @param NAME Either 1 or 3
* @see MSPEL_FILTER13_CORE for information on A1->A4
@@ -357,7 +357,7 @@ OPNAME ## vc1_hor_16b_ ## NAME ## _mmx(uint8_t *dst, x86_reg stride, \
}
/**
- * Macro to build the 8bits, any direction, version of vc1_put_shift[13].
+ * Macro to build the 8 bits, any direction, version of vc1_put_shift[13].
* Here, offset=src_stride. Parameters passed A1 to A4 must use
* %3 (offset) and %4 (3*offset).
*