LORENE
connection_fspher.C
1 /*
2  * Methods of class Connection_fspher.
3  *
4  * (see file connection.h for documentation)
5  *
6  */
7 
8 /*
9  * Copyright (c) 2003-2004 Eric Gourgoulhon & Jerome Novak
10  *
11  * This file is part of LORENE.
12  *
13  * LORENE is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License version 2
15  * as published by the Free Software Foundation.
16  *
17  * LORENE is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20  * GNU General Public License for more details.
21  *
22  * You should have received a copy of the GNU General Public License
23  * along with LORENE; if not, write to the Free Software
24  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25  *
26  */
27 
28 char connection_fspher_C[] = "$Header: /cvsroot/Lorene/C++/Source/Connection/connection_fspher.C,v 1.24 2014/10/13 08:52:50 j_novak Exp $" ;
29 
30 /*
31  * $Id: connection_fspher.C,v 1.24 2014/10/13 08:52:50 j_novak Exp $
32  * $Log: connection_fspher.C,v $
33  * Revision 1.24 2014/10/13 08:52:50 j_novak
34  * Lorene classes and functions now belong to the namespace Lorene.
35  *
36  * Revision 1.23 2014/10/06 15:13:04 j_novak
37  * Modified #include directives to use c++ syntax.
38  *
39  * Revision 1.22 2005/05/25 16:11:03 j_novak
40  * Better handling of the case with no compactified domain.
41  *
42  * Revision 1.21 2004/01/29 15:21:21 e_gourgoulhon
43  * Method p_divergence: changed treatment of dzpuis.
44  * Methods p_derive_cov and p_divergence: add warning if all the input component
45  * do not have the same dzpuis.
46  *
47  * Revision 1.20 2004/01/28 13:25:40 j_novak
48  * The ced_mult_r arguments have been suppressed from the Scalar::*dsd* methods.
49  * In the div/mult _r_dzpuis, there is no more default value.
50  *
51  * Revision 1.19 2004/01/27 15:10:02 j_novak
52  * New methods Scalar::div_r_dzpuis(int) and Scalar_mult_r_dzpuis(int)
53  * which replace div_r_inc*. Tried to clean the dzpuis handling.
54  * WARNING: no testing at this point!!
55  *
56  * Revision 1.18 2004/01/23 07:57:06 e_gourgoulhon
57  * Slight change in some comment.
58  *
59  * Revision 1.17 2004/01/22 16:14:22 e_gourgoulhon
60  * Method p_derive_cov: reorganization of the dzpuis treatment.
61  * Added the case of input dzpuis = 2.
62  *
63  * Revision 1.16 2004/01/04 21:00:50 e_gourgoulhon
64  * Better handling of tensor symmetries in methods p_derive_cov() and
65  * p_divergence() (thanks to the new class Tensor_sym).
66  *
67  * Revision 1.15 2004/01/01 11:24:04 e_gourgoulhon
68  * Full reorganization of method p_derive_cov: the main loop is now
69  * on the indices of the *output* tensor (to take into account
70  * symmetries in the input and output tensors).
71  *
72  * Revision 1.14 2003/12/27 14:59:52 e_gourgoulhon
73  * -- Method derive_cov() suppressed.
74  * -- Change of the position of the derivation index from the first one
75  * to the last one in methods p_derive_cov() and p_divergence().
76  *
77  * Revision 1.13 2003/11/03 13:37:58 j_novak
78  * Still dzpuis...
79  *
80  * Revision 1.12 2003/11/03 11:14:18 j_novak
81  * Treatment of the case dzpuis = 4.
82  *
83  * Revision 1.11 2003/11/03 10:58:30 j_novak
84  * Treatment of the general case for divergence.
85  *
86  * Revision 1.10 2003/10/22 13:08:03 j_novak
87  * Better handling of dzpuis flags
88  *
89  * Revision 1.9 2003/10/16 15:26:48 e_gourgoulhon
90  * Name of method Scalar::div_r_ced() changed to Scalar::div_r_inc2().
91  *
92  * Revision 1.8 2003/10/16 14:21:36 j_novak
93  * The calculation of the divergence of a Tensor is now possible.
94  *
95  * Revision 1.7 2003/10/15 10:46:18 e_gourgoulhon
96  * Introduced call to the new method Scalar::div_tant to perform
97  * division by tan(theta) in derive_cov.
98  *
99  * Revision 1.6 2003/10/11 16:45:43 e_gourgoulhon
100  * Suppressed the call to Itbl::set_etat_qcq() after
101  * the construction of the Itbl's.
102  *
103  * Revision 1.5 2003/10/11 14:39:50 e_gourgoulhon
104  * Suppressed declaration of unusued arguments in some methods.
105  *
106  * Revision 1.4 2003/10/06 13:58:47 j_novak
107  * The memory management has been improved.
108  * Implementation of the covariant derivative with respect to the exact Tensor
109  * type.
110  *
111  * Revision 1.3 2003/10/05 21:09:23 e_gourgoulhon
112  * Method derive_cov: multiplication by r^2 in the CED.
113  *
114  * Revision 1.2 2003/10/01 21:49:45 e_gourgoulhon
115  * First version of derive_cov --- not tested yet.
116  *
117  * Revision 1.1 2003/10/01 15:42:49 e_gourgoulhon
118  * still ongoing...
119  *
120  *
121  *
122  * $Header: /cvsroot/Lorene/C++/Source/Connection/connection_fspher.C,v 1.24 2014/10/13 08:52:50 j_novak Exp $
123  *
124  */
125 
126 // C++ headers
127 #include "headcpp.h"
128 
129 // C headers
130 #include <cstdlib>
131 
132 // Lorene headers
133 #include "connection.h"
134 
135  //------------------------------//
136  // Constructors //
137  //------------------------------//
138 
139 // Contructor from a spherical flat-metric-orthonormal basis
140 
141 namespace Lorene {
143  : Connection_flat(mpi, bi) {
144 
145 }
146 
147 // Copy constructor
149  : Connection_flat(ci) {
150 
151 }
152 
153 
154  //----------------------------//
155  // Destructor //
156  //----------------------------//
157 
159 
160 }
161 
162 
163  //--------------------------------//
164  // Mutators / assignment //
165  //--------------------------------//
166 
167 
169 
170  cout << "Connection_fspher::operator= : not implemented yet !" << endl ;
171  abort() ;
172 
173 }
174 
175 
176  //-----------------------------//
177  // Computational methods //
178  //-----------------------------//
179 
180 // Covariant derivative, returning a pointer.
181 //-------------------------------------------
182 
184 
185  // Notations: suffix 0 in name <=> input tensor
186  // suffix 1 in name <=> output tensor
187 
188  int valence0 = uu.get_valence() ;
189  int valence1 = valence0 + 1 ;
190  int valence1m1 = valence1 - 1 ; // same as valence0, but introduced for
191  // the sake of clarity
192  int ncomp0 = uu.get_n_comp() ;
193 
194  // Protections
195  // -----------
196  if (valence0 >= 1) {
197  assert(uu.get_triad() == triad) ;
198  }
199 
200  // Creation of the result (pointer)
201  // --------------------------------
202  Tensor* resu ;
203 
204  // If uu is a Scalar, the result is a vector
205  if (valence0 == 0)
206  resu = new Vector(*mp, COV, triad) ;
207  else {
208 
209  // Type of indices of the result :
210  Itbl tipe(valence1) ;
211  const Itbl& tipeuu = uu.get_index_type() ;
212  for (int id = 0; id<valence0; id++) {
213  tipe.set(id) = tipeuu(id) ; // First indices = same as uu
214  }
215  tipe.set(valence1m1) = COV ; // last index is the derivation index
216 
217  // if uu is a Tensor_sym, the result is also a Tensor_sym:
218  const Tensor* puu = &uu ;
219  const Tensor_sym* puus = dynamic_cast<const Tensor_sym*>(puu) ;
220  if ( puus != 0x0 ) { // the input tensor is symmetric
221  resu = new Tensor_sym(*mp, valence1, tipe, *triad,
222  puus->sym_index1(), puus->sym_index2()) ;
223  }
224  else {
225  resu = new Tensor(*mp, valence1, tipe, *triad) ; // no symmetry
226  }
227 
228  }
229 
230  int ncomp1 = resu->get_n_comp() ;
231 
232  Itbl ind1(valence1) ; // working Itbl to store the indices of resu
233  Itbl ind0(valence0) ; // working Itbl to store the indices of uu
234  Itbl ind(valence0) ; // working Itbl to store the indices of uu
235 
236  Scalar tmp(*mp) ; // working scalar
237 
238  // Determination of the dzpuis parameter of the result --> dz_resu
239  // ---------------------------------------------------
240  int dz_in = 0 ;
241  for (int ic=0; ic<ncomp0; ic++) {
242  int dzp = uu(uu.indices(ic)).get_dzpuis() ;
243  assert(dzp >= 0) ;
244  if (dzp > dz_in) dz_in = dzp ;
245  }
246 
247 #ifndef NDEBUG
248  // Check : do all components have the same dzpuis ?
249  for (int ic=0; ic<ncomp0; ic++) {
250  if ( !(uu(uu.indices(ic)).check_dzpuis(dz_in)) ) {
251  cout << "######## WARNING #######\n" ;
252  cout << " Connection_fspher::p_derive_cov : the tensor components \n"
253  << " do not have all the same dzpuis ! : \n"
254  << " ic, dzpuis(ic), dz_in : " << ic << " "
255  << uu(uu.indices(ic)).get_dzpuis() << " " << dz_in << endl ;
256  }
257  }
258 #endif
259 
260  int dz_resu = (dz_in == 0) ? 2 : dz_in + 1 ;
261  int nzm1 = mp->get_mg()->get_nzone() - 1 ;
262  if (mp->get_mg()->get_type_r(nzm1) != UNSURR) dz_resu = 0 ;
263 
264  // Loop on all the components of the output tensor
265  // -----------------------------------------------
266  for (int ic=0; ic<ncomp1; ic++) {
267 
268  // indices corresponding to the component no. ic in the output tensor
269  ind1 = resu->indices(ic) ;
270 
271  // Component no. ic:
272  Scalar& cresu = resu->set(ind1) ;
273 
274  // Indices of the input tensor
275  for (int id = 0; id < valence0; id++) {
276  ind0.set(id) = ind1(id) ;
277  }
278 
279  // Value of last index (derivation index)
280  int k = ind1(valence1m1) ;
281 
282  switch (k) {
283 
284  case 1 : { // Derivation index = r
285  //---------------------
286 
287  cresu = (uu(ind0)).dsdr() ; // d/dr
288 
289  // all the connection coefficients Gamma^i_{jk} are zero for k=1
290  break ;
291  }
292 
293  case 2 : { // Derivation index = theta
294  //-------------------------
295 
296  cresu = (uu(ind0)).srdsdt() ; // 1/r d/dtheta
297 
298  // Loop on all the indices of uu
299  for (int id=0; id<valence0; id++) {
300 
301  switch ( ind0(id) ) {
302 
303  case 1 : { // Gamma^r_{l theta} V^l
304  // or -Gamma^l_{r theta} V_l
305  ind = ind0 ;
306  ind.set(id) = 2 ; // l = theta
307 
308  // Division by r :
309  tmp = uu(ind) ;
310  tmp.div_r_dzpuis(dz_resu) ;
311 
312  cresu -= tmp ;
313  break ;
314  }
315 
316  case 2 : { // Gamma^theta_{l theta} V^l
317  // or -Gamma^l_{theta theta} V_l
318  ind = ind0 ;
319  ind.set(id) = 1 ; // l = r
320  tmp = uu(ind) ;
321  tmp.div_r_dzpuis(dz_resu) ;
322 
323  cresu += tmp ;
324  break ;
325  }
326 
327  case 3 : { // Gamma^phi_{l theta} V^l
328  // or -Gamma^l_{phi theta} V_l
329  break ;
330  }
331 
332  default : {
333  cerr << "Connection_fspher::p_derive_cov : index problem ! "
334  << endl ;
335  abort() ;
336  }
337  }
338 
339  }
340  break ;
341  }
342 
343 
344  case 3 : { // Derivation index = phi
345  //-----------------------
346 
347  cresu = (uu(ind0)).srstdsdp() ; // 1/(r sin(theta)) d/dphi
348 
349  // Loop on all the indices of uu
350  for (int id=0; id<valence0; id++) {
351 
352  switch ( ind0(id) ) {
353 
354  case 1 : { // Gamma^r_{l phi} V^l
355  // or -Gamma^l_{r phi} V_l
356  ind = ind0 ;
357  ind.set(id) = 3 ; // l = phi
358  tmp = uu(ind) ;
359  tmp.div_r_dzpuis(dz_resu) ;
360 
361  cresu -= tmp ;
362  break ;
363  }
364 
365  case 2 : { // Gamma^theta_{l phi} V^l
366  // or -Gamma^l_{theta phi} V_l
367  ind = ind0 ;
368  ind.set(id) = 3 ; // l = phi
369  tmp = uu(ind) ;
370  tmp.div_r_dzpuis(dz_resu) ;
371 
372  tmp.div_tant() ; // division by tan(theta)
373 
374  cresu -= tmp ;
375  break ;
376  }
377 
378  case 3 : { // Gamma^phi_{l phi} V^l
379  // or -Gamma^l_{phi phi} V_l
380 
381  ind = ind0 ;
382  ind.set(id) = 1 ; // l = r
383  tmp = uu(ind) ;
384  tmp.div_r_dzpuis(dz_resu) ;
385 
386  cresu += tmp ;
387 
388  ind.set(id) = 2 ; // l = theta
389  tmp = uu(ind) ;
390  tmp.div_r_dzpuis(dz_resu) ;
391 
392  tmp.div_tant() ; // division by tan(theta)
393 
394  cresu += tmp ;
395  break ;
396  }
397 
398  default : {
399  cerr << "Connection_fspher::p_derive_cov : index problem ! "
400  << endl ;
401  abort() ;
402  }
403  }
404 
405  }
406 
407  break ;
408  }
409 
410  default : {
411  cerr << "Connection_fspher::p_derive_cov : index problem ! \n" ;
412  abort() ;
413  }
414 
415  } // End of switch on the derivation index
416 
417 
418  } // End of loop on all the components of the output tensor
419 
420  // C'est fini !
421  // -----------
422  return resu ;
423 
424 }
425 
426 
427 
428 // Divergence, returning a pointer.
429 //---------------------------------
430 
432 
433  // Notations: suffix 0 in name <=> input tensor
434  // suffix 1 in name <=> output tensor
435 
436  int valence0 = uu.get_valence() ;
437  int valence1 = valence0 - 1 ;
438  int valence0m1 = valence0 - 1 ; // same as valence1 but introduced for
439  // the sake of clarity
440  int ncomp0 = uu.get_n_comp() ;
441 
442  // Protections
443  // -----------
444  assert (valence0 >= 1) ;
445  assert (uu.get_triad() == triad) ;
446 
447  // Last index must be contravariant:
448  assert (uu.get_index_type(valence0-1) == CON) ;
449 
450 
451  // Creation of the pointer on the result tensor
452  // --------------------------------------------
453  Tensor* resu ;
454 
455  if (valence0 == 1) // if u is a Vector, the result is a Scalar
456  resu = new Scalar(*mp) ;
457  else {
458 
459  // Type of indices of the result :
460  Itbl tipe(valence1) ;
461  const Itbl& tipeuu = uu.get_index_type() ;
462  for (int id = 0; id<valence1; id++) {
463  tipe.set(id) = tipeuu(id) ; // type of remaining indices =
464  } // same as uu indices
465 
466  if (valence0 == 2) { // if u is a rank 2 tensor, the result is a Vector
467  resu = new Vector(*mp, tipe(0), *triad) ;
468  }
469  else {
470  // if uu is a Tensor_sym, the result might be also a Tensor_sym:
471  const Tensor* puu = &uu ;
472  const Tensor_sym* puus = dynamic_cast<const Tensor_sym*>(puu) ;
473  if ( puus != 0x0 ) { // the input tensor is symmetric
474 
475  if (puus->sym_index2() != valence0 - 1) {
476 
477  // the symmetry is preserved:
478 
479  if (valence1 == 2) {
480  resu = new Sym_tensor(*mp, tipe, *triad) ;
481  }
482  else {
483  resu = new Tensor_sym(*mp, valence1, tipe, *triad,
484  puus->sym_index1(), puus->sym_index2()) ;
485  }
486  }
487  else { // the symmetry is lost:
488 
489  resu = new Tensor(*mp, valence1, tipe, *triad) ;
490  }
491  }
492  else { // no symmetry in the input tensor:
493  resu = new Tensor(*mp, valence1, tipe, *triad) ;
494  }
495  }
496 
497  }
498 
499  int ncomp1 = resu->get_n_comp() ;
500 
501  Itbl ind0(valence0) ; // working Itbl to store the indices of uu
502  Itbl ind1(valence1) ; // working Itbl to store the indices of resu
503  Itbl ind(valence0) ; // working Itbl to store the indices of uu
504 
505  Scalar tmp1(*mp) ; // working scalar
506  Scalar tmp2(*mp) ; // working scalar
507 
508  // Determination of the dzpuis parameter of the result --> dz_resu
509  // ---------------------------------------------------
510  int dz_in = 0 ;
511  for (int ic=0; ic<ncomp0; ic++) {
512  int dzp = uu(uu.indices(ic)).get_dzpuis() ;
513  assert(dzp >= 0) ;
514  if (dzp > dz_in) dz_in = dzp ;
515  }
516 
517 #ifndef NDEBUG
518  // Check : do all components have the same dzpuis ?
519  for (int ic=0; ic<ncomp0; ic++) {
520  if ( !(uu(uu.indices(ic)).check_dzpuis(dz_in)) ) {
521  cout << "######## WARNING #######\n" ;
522  cout << " Connection_fspher::p_divergence : the tensor components \n"
523  << " do not have all the same dzpuis ! : \n"
524  << " ic, dzpuis(ic), dz_in : " << ic << " "
525  << uu(uu.indices(ic)).get_dzpuis() << " " << dz_in << endl ;
526  }
527  }
528 #endif
529 
530  int dz_resu = (dz_in == 0) ? 2 : dz_in + 1 ;
531 
532  // Loop on all the components of the output tensor
533  for (int ic=0; ic<ncomp1; ic++) {
534 
535  ind1 = resu->indices(ic) ;
536  Scalar& cresu = resu->set(ind1) ;
537 
538  // Derivation index = r
539  // --------------------
540  int k = 1 ;
541 
542  // indices (ind1,k) in the input tensor
543  for (int id = 0; id < valence1; id++) {
544  ind0.set(id) = ind1(id) ;
545  }
546  ind0.set(valence0m1) = k ;
547 
548  cresu = uu(ind0).dsdr() ; //dT^{l r}/dr
549 
550  // Derivation index = theta
551  // ------------------------
552  k = 2 ;
553 
554  // indices (ind1,k) in the input tensor
555  for (int id = 0; id < valence1; id++) {
556  ind0.set(id) = ind1(id) ;
557  }
558  ind0.set(valence0m1) = k ;
559 
560  tmp1 = uu(ind0).dsdt() ; //dT^{l theta} /dtheta
561 
562  ind = ind0 ;
563  ind.set(valence0m1) = 1 ;
564  tmp1 += uu(ind) ;//Gamma^theta_{r theta}T^{l r} (div_r is done later)
565 
566 
567  // Loop on all the (valence0-1) first indices of uu
568  for (int id=0; id<valence0m1; id++) {
569 
570  switch ( ind0(id) ) {
571  case 1 : { // Gamma^r_{l theta} V^l
572  // or -Gamma^l_{r theta} V_l
573  ind = ind0 ;
574  ind.set(id) = 2 ; // l = theta
575  tmp1 -= uu(ind) ;
576  break ;
577  }
578 
579  case 2 : { // Gamma^theta_{l theta} V^l
580  // or -Gamma^l_{theta theta} V_l
581  ind = ind0 ;
582  ind.set(id) = 1 ; // l = r
583  tmp1 += uu(ind) ;
584  break ;
585  }
586 
587  case 3 : { // Gamma^phi_{l theta} V^l
588  // or -Gamma^l_{phi theta} V_l
589  break ;
590  }
591 
592  default : {
593  cout << "Connection_fspher::p_divergence : index problem ! "
594  << endl ;
595  abort() ;
596  }
597  }
598 
599  }
600 
601  // Derivation index = phi
602  // ----------------------
603  k = 3 ;
604 
605  // indices (ind1,k) in the input tensor
606  for (int id = 0; id < valence1; id++) {
607  ind0.set(id) = ind1(id) ;
608  }
609  ind0.set(valence0m1) = k ;
610 
611  tmp1 += uu(ind0).stdsdp() ; // 1/sin(theta) dT^phi / dphi
612 
613  ind = ind0 ;
614  ind.set(valence0m1) = 1 ;
615  tmp1 += uu(ind) ;//Gamma^phi_{r phi} T^{l r} (div_r is done later)
616  ind.set(valence0m1) = 2 ;
617  tmp2 = uu(ind) ;//Gamma^phi_{theta phi} T^{l theta} (div_r is done later)
618 
619  // Loop on all the (valence0-1) first indices of uu
620  for (int id=0; id<valence0-1; id++) {
621 
622  switch ( ind0(id) ) {
623  case 1 : { // Gamma^r_{l phi} V^l
624  // or -Gamma^l_{r phi} V_l
625  ind = ind0 ;
626  ind.set(id) = 3 ; // l = phi
627  tmp1 -= uu(ind) ;
628  break ;
629  }
630 
631  case 2 : { // Gamma^theta_{l phi} V^l
632  // or -Gamma^l_{theta phi} V_l
633  ind = ind0 ;
634  ind.set(id) = 3 ; // l = phi
635  tmp2 -= uu(ind) ;
636  break ;
637  }
638 
639  case 3 : { // Gamma^phi_{l phi} V^l
640  // or -Gamma^l_{phi phi} V_l
641  ind = ind0 ;
642 
643  ind.set(id) = 1 ; // l = r
644  tmp1 += uu(ind) ;
645 
646  ind.set(id) = 2 ; // l = theta
647  tmp2 += uu(ind) ;
648  break ;
649  }
650 
651  default : {
652  cout << "Connection_fspher::p_divergence : index problem ! "
653  << endl ;
654  abort() ;
655  }
656  }
657  }
658  // There remains a division by tan(theta) and r:
659  //----------------------------------------------
660  tmp2.div_tant() ;
661  tmp1 += tmp2 ;
662  tmp1.div_r_dzpuis(dz_resu) ;
663 
664  cresu += tmp1 ; // the d/dr term...
665 
666  }
667 
668  // C'est fini !
669  // -----------
670  return resu ;
671 
672 }
673 
674 
675 
676 
677 
678 
679 
680 }
Connection_fspher(const Map &, const Base_vect_spher &)
Contructor from a spherical flat-metric-orthonormal basis.
int & set(int i)
Read/write of a particular element (index i ) (1D case)
Definition: itbl.h:247
virtual Tensor * p_divergence(const Tensor &tens) const
Computes the divergence of a tensor (with respect to the current connection).
const Map *const mp
Reference mapping.
Definition: connection.h:119
Class Connection_flat.
Definition: connection.h:354
int sym_index1() const
Number of the first symmetric index (0<= id_sym1 < valence )
Definition: tensor.h:1149
Lorene prototypes.
Definition: app_hor.h:64
const Mg3d * get_mg() const
Gives the Mg3d on which the mapping is defined.
Definition: map.h:765
Tensor field of valence 0 (or component of a tensorial field).
Definition: scalar.h:387
Base class for coordinate mappings.
Definition: map.h:670
int get_n_comp() const
Returns the number of stored components.
Definition: tensor.h:872
int sym_index2() const
Number of the second symmetric index (id_sym1 < id_sym2 < valence )
Definition: tensor.h:1154
Basic integer array class.
Definition: itbl.h:122
Tensor field of valence 1.
Definition: vector.h:188
const Base_vect * get_triad() const
Returns the vectorial basis (triad) on which the components are defined.
Definition: tensor.h:866
int get_index_type(int i) const
Gives the type (covariant or contravariant) of the index number i .
Definition: tensor.h:886
virtual ~Connection_fspher()
destructor
int get_nzone() const
Returns the number of domains.
Definition: grilles.h:448
void operator=(const Connection_fspher &)
Assignment to another Connection_fspher.
Tensor handling.
Definition: tensor.h:288
int get_valence() const
Returns the valence.
Definition: tensor.h:869
Spherical orthonormal vectorial bases (triads).
Definition: base_vect.h:308
Scalar & set(const Itbl &ind)
Returns the value of a component (read/write version).
Definition: tensor.C:654
Class Connection_fspher.
Definition: connection.h:452
Symmetric tensors (with respect to two of their arguments).
Definition: tensor.h:1037
int get_type_r(int l) const
Returns the type of sampling in the radial direction in domain no.
Definition: grilles.h:474
virtual Tensor * p_derive_cov(const Tensor &tens) const
Computes the covariant derivative of a tensor (with respect to the current connection).
void div_r_dzpuis(int ced_mult_r)
Division by r everywhere but with the output flag dzpuis set to ced_mult_r .
void div_tant()
Division by .
const Base_vect *const triad
Triad with respect to which the connection coefficients are defined.
Definition: connection.h:124
Class intended to describe valence-2 symmetric tensors.
Definition: sym_tensor.h:223
virtual Itbl indices(int pos) const
Returns the indices of a component given by its position in the array cmp .
Definition: tensor.C:539