Mercurial > hg > pmhd
annotate ffmpeg/libavcodec/alpha/hpeldsp_alpha_asm.S @ 13:844d341cf643 tip
Back up before ISMIR
author | Yading Song <yading.song@eecs.qmul.ac.uk> |
---|---|
date | Thu, 31 Oct 2013 13:17:06 +0000 |
parents | 6840f77b83aa |
children |
rev | line source |
---|---|
yading@10 | 1 /* |
yading@10 | 2 * Alpha optimized DSP utils |
yading@10 | 3 * Copyright (c) 2002 Falk Hueffner <falk@debian.org> |
yading@10 | 4 * |
yading@10 | 5 * This file is part of FFmpeg. |
yading@10 | 6 * |
yading@10 | 7 * FFmpeg is free software; you can redistribute it and/or |
yading@10 | 8 * modify it under the terms of the GNU Lesser General Public |
yading@10 | 9 * License as published by the Free Software Foundation; either |
yading@10 | 10 * version 2.1 of the License, or (at your option) any later version. |
yading@10 | 11 * |
yading@10 | 12 * FFmpeg is distributed in the hope that it will be useful, |
yading@10 | 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of |
yading@10 | 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
yading@10 | 15 * Lesser General Public License for more details. |
yading@10 | 16 * |
yading@10 | 17 * You should have received a copy of the GNU Lesser General Public |
yading@10 | 18 * License along with FFmpeg; if not, write to the Free Software |
yading@10 | 19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
yading@10 | 20 */ |
yading@10 | 21 |
yading@10 | 22 /* |
yading@10 | 23 * These functions are scheduled for pca56. They should work |
yading@10 | 24 * reasonably on ev6, though. |
yading@10 | 25 */ |
yading@10 | 26 |
yading@10 | 27 #include "regdef.h" |
yading@10 | 28 |
yading@10 | 29 |
yading@10 | 30 .set noat |
yading@10 | 31 .set noreorder |
yading@10 | 32 .arch pca56 |
yading@10 | 33 .text |
yading@10 | 34 |
yading@10 | 35 /************************************************************************ |
yading@10 | 36 * void put_pixels_axp_asm(uint8_t *block, const uint8_t *pixels, |
yading@10 | 37 * int line_size, int h) |
yading@10 | 38 */ |
yading@10 | 39 .align 6 |
yading@10 | 40 .globl put_pixels_axp_asm |
yading@10 | 41 .ent put_pixels_axp_asm |
yading@10 | 42 put_pixels_axp_asm: |
yading@10 | 43 .frame sp, 0, ra |
yading@10 | 44 .prologue 0 |
yading@10 | 45 |
yading@10 | 46 and a1, 7, t0 |
yading@10 | 47 beq t0, $aligned |
yading@10 | 48 |
yading@10 | 49 .align 4 |
yading@10 | 50 $unaligned: |
yading@10 | 51 ldq_u t0, 0(a1) |
yading@10 | 52 ldq_u t1, 8(a1) |
yading@10 | 53 addq a1, a2, a1 |
yading@10 | 54 nop |
yading@10 | 55 |
yading@10 | 56 ldq_u t2, 0(a1) |
yading@10 | 57 ldq_u t3, 8(a1) |
yading@10 | 58 addq a1, a2, a1 |
yading@10 | 59 nop |
yading@10 | 60 |
yading@10 | 61 ldq_u t4, 0(a1) |
yading@10 | 62 ldq_u t5, 8(a1) |
yading@10 | 63 addq a1, a2, a1 |
yading@10 | 64 nop |
yading@10 | 65 |
yading@10 | 66 ldq_u t6, 0(a1) |
yading@10 | 67 ldq_u t7, 8(a1) |
yading@10 | 68 extql t0, a1, t0 |
yading@10 | 69 addq a1, a2, a1 |
yading@10 | 70 |
yading@10 | 71 extqh t1, a1, t1 |
yading@10 | 72 addq a0, a2, t8 |
yading@10 | 73 extql t2, a1, t2 |
yading@10 | 74 addq t8, a2, t9 |
yading@10 | 75 |
yading@10 | 76 extqh t3, a1, t3 |
yading@10 | 77 addq t9, a2, ta |
yading@10 | 78 extql t4, a1, t4 |
yading@10 | 79 or t0, t1, t0 |
yading@10 | 80 |
yading@10 | 81 extqh t5, a1, t5 |
yading@10 | 82 or t2, t3, t2 |
yading@10 | 83 extql t6, a1, t6 |
yading@10 | 84 or t4, t5, t4 |
yading@10 | 85 |
yading@10 | 86 extqh t7, a1, t7 |
yading@10 | 87 or t6, t7, t6 |
yading@10 | 88 stq t0, 0(a0) |
yading@10 | 89 stq t2, 0(t8) |
yading@10 | 90 |
yading@10 | 91 stq t4, 0(t9) |
yading@10 | 92 subq a3, 4, a3 |
yading@10 | 93 stq t6, 0(ta) |
yading@10 | 94 addq ta, a2, a0 |
yading@10 | 95 |
yading@10 | 96 bne a3, $unaligned |
yading@10 | 97 ret |
yading@10 | 98 |
yading@10 | 99 .align 4 |
yading@10 | 100 $aligned: |
yading@10 | 101 ldq t0, 0(a1) |
yading@10 | 102 addq a1, a2, a1 |
yading@10 | 103 ldq t1, 0(a1) |
yading@10 | 104 addq a1, a2, a1 |
yading@10 | 105 |
yading@10 | 106 ldq t2, 0(a1) |
yading@10 | 107 addq a1, a2, a1 |
yading@10 | 108 ldq t3, 0(a1) |
yading@10 | 109 |
yading@10 | 110 addq a0, a2, t4 |
yading@10 | 111 addq a1, a2, a1 |
yading@10 | 112 addq t4, a2, t5 |
yading@10 | 113 subq a3, 4, a3 |
yading@10 | 114 |
yading@10 | 115 stq t0, 0(a0) |
yading@10 | 116 addq t5, a2, t6 |
yading@10 | 117 stq t1, 0(t4) |
yading@10 | 118 addq t6, a2, a0 |
yading@10 | 119 |
yading@10 | 120 stq t2, 0(t5) |
yading@10 | 121 stq t3, 0(t6) |
yading@10 | 122 |
yading@10 | 123 bne a3, $aligned |
yading@10 | 124 ret |
yading@10 | 125 .end put_pixels_axp_asm |