Actual source code: dgefa4.c
1: /*
2: Inverts 4 by 4 matrix using gaussian elimination with partial pivoting.
4: Used by the sparse factorization routines in
5: src/mat/impls/baij/seq
7: This is a combination of the Linpack routines
8: dgefa() and dgedi() specialized for a size of 4.
10: */
11: #include <petscsys.h>
13: PETSC_EXTERN PetscErrorCode PetscKernel_A_gets_inverse_A_4(MatScalar *a, PetscReal shift, PetscBool allowzeropivot, PetscBool *zeropivotdetected)
14: {
15: PetscInt i__2, i__3, kp1, j, k, l, ll, i, ipvt[4], kb, k3;
16: PetscInt k4, j3;
17: MatScalar *aa, *ax, *ay, work[16], stmp;
18: MatReal tmp, max;
20: PetscFunctionBegin;
21: if (zeropivotdetected) *zeropivotdetected = PETSC_FALSE;
22: shift = .25 * shift * (1.e-12 + PetscAbsScalar(a[0]) + PetscAbsScalar(a[5]) + PetscAbsScalar(a[10]) + PetscAbsScalar(a[15]));
24: /* Parameter adjustments */
25: a -= 5;
27: for (k = 1; k <= 3; ++k) {
28: kp1 = k + 1;
29: k3 = 4 * k;
30: k4 = k3 + k;
32: /* find l = pivot index */
33: i__2 = 5 - k;
34: aa = &a[k4];
35: max = PetscAbsScalar(aa[0]);
36: l = 1;
37: for (ll = 1; ll < i__2; ll++) {
38: tmp = PetscAbsScalar(aa[ll]);
39: if (tmp > max) {
40: max = tmp;
41: l = ll + 1;
42: }
43: }
44: l += k - 1;
45: ipvt[k - 1] = l;
47: if (a[l + k3] == 0.0) {
48: if (shift == 0.0) {
49: if (allowzeropivot) {
50: PetscCall(PetscInfo(NULL, "Zero pivot, row %" PetscInt_FMT "\n", k - 1));
51: if (zeropivotdetected) *zeropivotdetected = PETSC_TRUE;
52: } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_MAT_LU_ZRPVT, "Zero pivot, row %" PetscInt_FMT, k - 1);
53: } else {
54: /* SHIFT is applied to SINGLE diagonal entry; does this make any sense? */
55: a[l + k3] = shift;
56: }
57: }
59: /* interchange if necessary */
60: if (l != k) {
61: stmp = a[l + k3];
62: a[l + k3] = a[k4];
63: a[k4] = stmp;
64: }
66: /* compute multipliers */
67: stmp = -1. / a[k4];
68: i__2 = 4 - k;
69: aa = &a[1 + k4];
70: for (ll = 0; ll < i__2; ll++) aa[ll] *= stmp;
72: /* row elimination with column indexing */
73: ax = &a[k4 + 1];
74: for (j = kp1; j <= 4; ++j) {
75: j3 = 4 * j;
76: stmp = a[l + j3];
77: if (l != k) {
78: a[l + j3] = a[k + j3];
79: a[k + j3] = stmp;
80: }
82: i__3 = 4 - k;
83: ay = &a[1 + k + j3];
84: for (ll = 0; ll < i__3; ll++) ay[ll] += stmp * ax[ll];
85: }
86: }
87: ipvt[3] = 4;
88: if (a[20] == 0.0) {
89: if (PetscLikely(allowzeropivot)) {
90: PetscCall(PetscInfo(NULL, "Zero pivot, row 3\n"));
91: if (zeropivotdetected) *zeropivotdetected = PETSC_TRUE;
92: } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_MAT_LU_ZRPVT, "Zero pivot, row 3");
93: }
95: /* Now form the inverse */
96: /* compute inverse(u) */
97: for (k = 1; k <= 4; ++k) {
98: k3 = 4 * k;
99: k4 = k3 + k;
100: a[k4] = 1.0 / a[k4];
101: stmp = -a[k4];
102: i__2 = k - 1;
103: aa = &a[k3 + 1];
104: for (ll = 0; ll < i__2; ll++) aa[ll] *= stmp;
105: kp1 = k + 1;
106: if (4 < kp1) continue;
107: ax = aa;
108: for (j = kp1; j <= 4; ++j) {
109: j3 = 4 * j;
110: stmp = a[k + j3];
111: a[k + j3] = 0.0;
112: ay = &a[j3 + 1];
113: for (ll = 0; ll < k; ll++) ay[ll] += stmp * ax[ll];
114: }
115: }
117: /* form inverse(u)*inverse(l) */
118: for (kb = 1; kb <= 3; ++kb) {
119: k = 4 - kb;
120: k3 = 4 * k;
121: kp1 = k + 1;
122: aa = a + k3;
123: for (i = kp1; i <= 4; ++i) {
124: work[i - 1] = aa[i];
125: aa[i] = 0.0;
126: }
127: for (j = kp1; j <= 4; ++j) {
128: stmp = work[j - 1];
129: ax = &a[4 * j + 1];
130: ay = &a[k3 + 1];
131: ay[0] += stmp * ax[0];
132: ay[1] += stmp * ax[1];
133: ay[2] += stmp * ax[2];
134: ay[3] += stmp * ax[3];
135: }
136: l = ipvt[k - 1];
137: if (l != k) {
138: ax = &a[k3 + 1];
139: ay = &a[4 * l + 1];
140: stmp = ax[0];
141: ax[0] = ay[0];
142: ay[0] = stmp;
143: stmp = ax[1];
144: ax[1] = ay[1];
145: ay[1] = stmp;
146: stmp = ax[2];
147: ax[2] = ay[2];
148: ay[2] = stmp;
149: stmp = ax[3];
150: ax[3] = ay[3];
151: ay[3] = stmp;
152: }
153: }
154: PetscFunctionReturn(PETSC_SUCCESS);
155: }
157: #if defined(PETSC_HAVE_SSE)
158: #include PETSC_HAVE_SSE
160: PETSC_EXTERN PetscErrorCode PetscKernel_A_gets_inverse_A_4_SSE(float *a)
161: {
162: /*
163: This routine is converted from Intel's Small Matrix Library.
164: See: Streaming SIMD Extensions -- Inverse of 4x4 Matrix
165: Order Number: 245043-001
166: March 1999
167: https://www.intel.com/content/www/us/en/homepage.html
169: Inverse of a 4x4 matrix via Kramer's Rule:
170: bool Invert4x4(SMLXMatrix &);
171: */
172: PetscFunctionBegin;
173: SSE_SCOPE_BEGIN;
174: SSE_INLINE_BEGIN_1(a)
176: /* ----------------------------------------------- */
178: SSE_LOADL_PS(SSE_ARG_1, FLOAT_0, XMM0)
179: SSE_LOADH_PS(SSE_ARG_1, FLOAT_4, XMM0)
181: SSE_LOADL_PS(SSE_ARG_1, FLOAT_8, XMM5)
182: SSE_LOADH_PS(SSE_ARG_1, FLOAT_12, XMM5)
184: SSE_COPY_PS(XMM3, XMM0)
185: SSE_SHUFFLE(XMM3, XMM5, 0x88)
187: SSE_SHUFFLE(XMM5, XMM0, 0xDD)
189: SSE_LOADL_PS(SSE_ARG_1, FLOAT_2, XMM0)
190: SSE_LOADH_PS(SSE_ARG_1, FLOAT_6, XMM0)
192: SSE_LOADL_PS(SSE_ARG_1, FLOAT_10, XMM6)
193: SSE_LOADH_PS(SSE_ARG_1, FLOAT_14, XMM6)
195: SSE_COPY_PS(XMM4, XMM0)
196: SSE_SHUFFLE(XMM4, XMM6, 0x88)
198: SSE_SHUFFLE(XMM6, XMM0, 0xDD)
200: /* ----------------------------------------------- */
202: SSE_COPY_PS(XMM7, XMM4)
203: SSE_MULT_PS(XMM7, XMM6)
205: SSE_SHUFFLE(XMM7, XMM7, 0xB1)
207: SSE_COPY_PS(XMM0, XMM5)
208: SSE_MULT_PS(XMM0, XMM7)
210: SSE_COPY_PS(XMM2, XMM3)
211: SSE_MULT_PS(XMM2, XMM7)
213: SSE_SHUFFLE(XMM7, XMM7, 0x4E)
215: SSE_COPY_PS(XMM1, XMM5)
216: SSE_MULT_PS(XMM1, XMM7)
217: SSE_SUB_PS(XMM1, XMM0)
219: SSE_MULT_PS(XMM7, XMM3)
220: SSE_SUB_PS(XMM7, XMM2)
222: SSE_SHUFFLE(XMM7, XMM7, 0x4E)
223: SSE_STORE_PS(SSE_ARG_1, FLOAT_4, XMM7)
225: /* ----------------------------------------------- */
227: SSE_COPY_PS(XMM0, XMM5)
228: SSE_MULT_PS(XMM0, XMM4)
230: SSE_SHUFFLE(XMM0, XMM0, 0xB1)
232: SSE_COPY_PS(XMM2, XMM6)
233: SSE_MULT_PS(XMM2, XMM0)
234: SSE_ADD_PS(XMM2, XMM1)
236: SSE_COPY_PS(XMM7, XMM3)
237: SSE_MULT_PS(XMM7, XMM0)
239: SSE_SHUFFLE(XMM0, XMM0, 0x4E)
241: SSE_COPY_PS(XMM1, XMM6)
242: SSE_MULT_PS(XMM1, XMM0)
243: SSE_SUB_PS(XMM2, XMM1)
245: SSE_MULT_PS(XMM0, XMM3)
246: SSE_SUB_PS(XMM0, XMM7)
248: SSE_SHUFFLE(XMM0, XMM0, 0x4E)
249: SSE_STORE_PS(SSE_ARG_1, FLOAT_12, XMM0)
251: /* ----------------------------------------------- */
253: SSE_COPY_PS(XMM7, XMM5)
254: SSE_SHUFFLE(XMM7, XMM5, 0x4E)
255: SSE_MULT_PS(XMM7, XMM6)
257: SSE_SHUFFLE(XMM7, XMM7, 0xB1)
259: SSE_SHUFFLE(XMM4, XMM4, 0x4E)
261: SSE_COPY_PS(XMM0, XMM4)
262: SSE_MULT_PS(XMM0, XMM7)
263: SSE_ADD_PS(XMM0, XMM2)
265: SSE_COPY_PS(XMM2, XMM3)
266: SSE_MULT_PS(XMM2, XMM7)
268: SSE_SHUFFLE(XMM7, XMM7, 0x4E)
270: SSE_COPY_PS(XMM1, XMM4)
271: SSE_MULT_PS(XMM1, XMM7)
272: SSE_SUB_PS(XMM0, XMM1)
273: SSE_STORE_PS(SSE_ARG_1, FLOAT_0, XMM0)
275: SSE_MULT_PS(XMM7, XMM3)
276: SSE_SUB_PS(XMM7, XMM2)
278: SSE_SHUFFLE(XMM7, XMM7, 0x4E)
280: /* ----------------------------------------------- */
282: SSE_COPY_PS(XMM1, XMM3)
283: SSE_MULT_PS(XMM1, XMM5)
285: SSE_SHUFFLE(XMM1, XMM1, 0xB1)
287: SSE_COPY_PS(XMM0, XMM6)
288: SSE_MULT_PS(XMM0, XMM1)
289: SSE_ADD_PS(XMM0, XMM7)
291: SSE_COPY_PS(XMM2, XMM4)
292: SSE_MULT_PS(XMM2, XMM1)
293: SSE_SUB_PS_M(XMM2, SSE_ARG_1, FLOAT_12)
295: SSE_SHUFFLE(XMM1, XMM1, 0x4E)
297: SSE_COPY_PS(XMM7, XMM6)
298: SSE_MULT_PS(XMM7, XMM1)
299: SSE_SUB_PS(XMM7, XMM0)
301: SSE_MULT_PS(XMM1, XMM4)
302: SSE_SUB_PS(XMM2, XMM1)
303: SSE_STORE_PS(SSE_ARG_1, FLOAT_12, XMM2)
305: /* ----------------------------------------------- */
307: SSE_COPY_PS(XMM1, XMM3)
308: SSE_MULT_PS(XMM1, XMM6)
310: SSE_SHUFFLE(XMM1, XMM1, 0xB1)
312: SSE_COPY_PS(XMM2, XMM4)
313: SSE_MULT_PS(XMM2, XMM1)
314: SSE_LOAD_PS(SSE_ARG_1, FLOAT_4, XMM0)
315: SSE_SUB_PS(XMM0, XMM2)
317: SSE_COPY_PS(XMM2, XMM5)
318: SSE_MULT_PS(XMM2, XMM1)
319: SSE_ADD_PS(XMM2, XMM7)
321: SSE_SHUFFLE(XMM1, XMM1, 0x4E)
323: SSE_COPY_PS(XMM7, XMM4)
324: SSE_MULT_PS(XMM7, XMM1)
325: SSE_ADD_PS(XMM7, XMM0)
327: SSE_MULT_PS(XMM1, XMM5)
328: SSE_SUB_PS(XMM2, XMM1)
330: /* ----------------------------------------------- */
332: SSE_MULT_PS(XMM4, XMM3)
334: SSE_SHUFFLE(XMM4, XMM4, 0xB1)
336: SSE_COPY_PS(XMM1, XMM6)
337: SSE_MULT_PS(XMM1, XMM4)
338: SSE_ADD_PS(XMM1, XMM7)
340: SSE_COPY_PS(XMM0, XMM5)
341: SSE_MULT_PS(XMM0, XMM4)
342: SSE_LOAD_PS(SSE_ARG_1, FLOAT_12, XMM7)
343: SSE_SUB_PS(XMM7, XMM0)
345: SSE_SHUFFLE(XMM4, XMM4, 0x4E)
347: SSE_MULT_PS(XMM6, XMM4)
348: SSE_SUB_PS(XMM1, XMM6)
350: SSE_MULT_PS(XMM5, XMM4)
351: SSE_ADD_PS(XMM5, XMM7)
353: /* ----------------------------------------------- */
355: SSE_LOAD_PS(SSE_ARG_1, FLOAT_0, XMM0)
356: SSE_MULT_PS(XMM3, XMM0)
358: SSE_COPY_PS(XMM4, XMM3)
359: SSE_SHUFFLE(XMM4, XMM3, 0x4E)
360: SSE_ADD_PS(XMM4, XMM3)
362: SSE_COPY_PS(XMM6, XMM4)
363: SSE_SHUFFLE(XMM6, XMM4, 0xB1)
364: SSE_ADD_SS(XMM6, XMM4)
366: SSE_COPY_PS(XMM3, XMM6)
367: SSE_RECIP_SS(XMM3, XMM6)
368: SSE_COPY_SS(XMM4, XMM3)
369: SSE_ADD_SS(XMM4, XMM3)
370: SSE_MULT_SS(XMM3, XMM3)
371: SSE_MULT_SS(XMM6, XMM3)
372: SSE_SUB_SS(XMM4, XMM6)
374: SSE_SHUFFLE(XMM4, XMM4, 0x00)
376: SSE_MULT_PS(XMM0, XMM4)
377: SSE_STOREL_PS(SSE_ARG_1, FLOAT_0, XMM0)
378: SSE_STOREH_PS(SSE_ARG_1, FLOAT_2, XMM0)
380: SSE_MULT_PS(XMM1, XMM4)
381: SSE_STOREL_PS(SSE_ARG_1, FLOAT_4, XMM1)
382: SSE_STOREH_PS(SSE_ARG_1, FLOAT_6, XMM1)
384: SSE_MULT_PS(XMM2, XMM4)
385: SSE_STOREL_PS(SSE_ARG_1, FLOAT_8, XMM2)
386: SSE_STOREH_PS(SSE_ARG_1, FLOAT_10, XMM2)
388: SSE_MULT_PS(XMM4, XMM5)
389: SSE_STOREL_PS(SSE_ARG_1, FLOAT_12, XMM4)
390: SSE_STOREH_PS(SSE_ARG_1, FLOAT_14, XMM4)
392: /* ----------------------------------------------- */
394: SSE_INLINE_END_1;
395: SSE_SCOPE_END;
396: PetscFunctionReturn(PETSC_SUCCESS);
397: }
399: #endif