Restructured the interdependencies within the BMWlibs (maybe other commits will follow)

This commit is contained in:
Bastian M. Wojek
2011-03-20 18:03:49 +00:00
parent dc86404f88
commit aaa0638729
51 changed files with 60 additions and 3223 deletions

View File

@@ -0,0 +1,74 @@
## Process this file with automake to create Makefile.in
h_sources = \
../include/TBofZCalc.h \
../include/TBulkTriVortexFieldCalc.h \
../include/TLondon1D.h \
../include/TPofBCalc.h \
../include/TPofTCalc.h \
../include/TSkewedGss.h \
../include/TVortex.h
h_linkdef = \
../include/TLondon1DLinkDef.h \
../include/TVortexLinkDef.h \
../include/TSkewedGssLinkDef.h
dict_h_sources = \
TLondon1DDict.h \
TSkewedGssDict.h \
TVortexDict.h
cpp_sources = \
TBulkTriVortexFieldCalc.cpp \
TBofZCalc.cpp \
TLondon1D.cpp \
TPofBCalc.cpp \
TPofTCalc.cpp \
TSkewedGss.cpp \
TVortex.cpp
dict_cpp_sources = \
TLondon1DDict.cpp \
TSkewedGssDict.cpp \
TVortexDict.cpp
include_HEADERS = $(h_sources)
noinst_HEADERS = $(h_linkdef) $(dict_h_sources)
INCLUDES = -I$(top_srcdir)/src/include -I../include $(BMWTOOLS_CFLAGS) $(LEM_CFLAGS) $(PMUSR_CFLAGS) $(FFTW3_CFLAGS) $(ROOT_CFLAGS)
AM_CXXFLAGS = $(LOCAL_LIB_CXXFLAGS)
BUILT_SOURCES = $(dict_cpp_sources) $(dict_h_sources)
AM_LDFLAGS = $(LOCAL_LIB_LDFLAGS) -L@ROOTLIBDIR@
CLEANFILES = *Dict.cpp *Dict.h *~ ../include/*~ core
%Dict.cpp %Dict.h: ../include/%.h ../include/%LinkDef.h
@ROOTCINT@ -v -f $*Dict.cpp -c -p $(INCLUDES) $^
lib_LTLIBRARIES = libFitPofB.la
libFitPofB_la_SOURCES = $(h_sources) $(cpp_sources) $(dict_h_sources) $(dict_cpp_sources)
libFitPofB_la_LIBADD = $(BMWTOOLS_LIBS) $(LEM_LIBS) $(PMUSR_LIBS) $(FFTW3_LIBS) $(ROOT_LIBS)
libFitPofB_la_LDFLAGS = -version-info $(PLUGIN_LIBRARY_VERSION) -release $(PLUGIN_RELEASE) $(AM_LDFLAGS)
## For the moment do not build pkgconfig files for musrfit plug-ins...
## pkgconfigdir = $(libdir)/pkgconfig
## pkgconfig_DATA = PTFitPofB.pc
## However, create some symbolic links to the shared library
## in order to unify the function call on different operating systems
if IS_DARWIN
install-exec-hook:
$(LN_S) $(libdir)/libFitPofB.dylib $(libdir)/libFitPofB.so
uninstall-hook:
rm -f $(libdir)/libFitPofB.so
endif
if IS_CYGWIN
install-exec-hook:
$(LN_S) $(bindir)/cygFitPofB-$(PLUGIN_MAJOR_VERSION)-$(PLUGIN_MINOR_VERSION)-$(PLUGIN_MAJOR_VERSION).dll $(libdir)/libFitPofB.so
uninstall-hook:
rm -f $(libdir)/libFitPofB.so
endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,754 @@
/***************************************************************************
TPofBCalc.cpp
Author: Bastian M. Wojek
e-mail: bastian.wojek@psi.ch
2008/09/04
***************************************************************************/
/***************************************************************************
* Copyright (C) 2009 by Bastian M. Wojek *
* *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "TPofTCalc.h"
#include <cmath>
#include <cstdlib>
#include <iostream>
#include <fstream>
#include <cassert>
#ifdef HAVE_GOMP
#include <omp.h>
#endif
/* USED FOR DEBUGGING-----------------------------------
#include <cstdio>
#include <ctime>
/-------------------------------------------------------*/
TPofBCalc::TPofBCalc(const vector<double> &para) : fBmin(0.0), fBmax(0.0), fDT(para[0]), fDB(para[1]), fPBExists(false) {
fPBSize = static_cast<int>(1.0/(gBar*fDT*fDB));
if (fPBSize % 2) {
fPBSize += 1;
} else {
fPBSize += 2;
}
fB = new double[fPBSize];
fPB = new double[fPBSize];
int i;
#ifdef HAVE_GOMP
#pragma omp parallel for default(shared) private(i) schedule(dynamic)
#endif
for (i = 0; i < static_cast<int>(fPBSize); ++i) {
fB[i] = static_cast<double>(i)*fDB;
fPB[i] = 0.0;
}
}
// Do not actually calculate P(B) but take it from a B and a P(B) vector of the same size
TPofBCalc::TPofBCalc(const vector<double>& b, const vector<double>& pb, double dt) {
assert(b.size() == pb.size() && b.size() >= 2);
fPBSize = pb.size();
fB = new double[fPBSize];
fPB = new double[fPBSize];
int i;
#ifdef HAVE_GOMP
#pragma omp parallel for default(shared) private(i) schedule(dynamic)
#endif
for (i = 0; i < static_cast<int>(fPBSize); ++i) {
fB[i] = b[i];
fPB[i] = pb[i];
}
vector<double>::const_iterator iter, iterB;
iterB = b.begin();
for(iter = pb.begin(); iter != pb.end(); ++iter){
if(*iter != 0.0) {
fBmin = *iterB;
// cout << fBmin << endl;
break;
}
++iterB;
}
for( ; iter != b.end(); ++iter){
if(*iter == 0.0) {
fBmax = *(iterB-1);
// cout << fBmax << endl;
break;
}
++iterB;
}
fDT = dt; // needed if a convolution should be done
fDB = b[1]-b[0];
// cout << fDB << endl;
fPBExists = true;
}
void TPofBCalc::UnsetPBExists() {
int i;
#ifdef HAVE_GOMP
#pragma omp parallel for default(shared) private(i) schedule(dynamic)
#endif
for (i = 0; i < static_cast<int>(fPBSize); ++i) {
fPB[i] = 0.0;
}
fPBExists = false;
}
void TPofBCalc::Normalize(unsigned int minFilledIndex = 0, unsigned int maxFilledIndex = 0) const {
if (!maxFilledIndex)
maxFilledIndex = fPBSize - 1;
int i;
double pBsum(0.0);
#ifdef HAVE_GOMP
#pragma omp parallel for default(shared) private(i) schedule(dynamic) reduction(+:pBsum)
#endif
for (i = minFilledIndex; i <= static_cast<int>(maxFilledIndex); ++i)
pBsum += fPB[i];
pBsum *= fDB;
#ifdef HAVE_GOMP
#pragma omp parallel for default(shared) private(i) schedule(dynamic)
#endif
for (i = minFilledIndex; i <= static_cast<int>(maxFilledIndex); ++i)
fPB[i] /= pBsum;
}
void TPofBCalc::Calculate(const string &type, const vector<double> &para) {
if (type == "skg"){ // skewed Gaussian
fBmin = 0.0;
fBmax = para[2]/gBar+10.0*fabs(para[4])/(2.0*pi*gBar);
int a3(static_cast<int>(floor(fBmax/fDB)));
int a4(static_cast<int>(ceil(fBmax/fDB)));
int BmaxIndex((a3 < a4) ? a4 : (a4 + 1));
int B0Index(static_cast<int>(ceil(para[2]/(gBar*fDB))));
double expominus(para[3]*para[3]/(2.0*pi*pi*gBar*gBar));
double expoplus(para[4]*para[4]/(2.0*pi*pi*gBar*gBar));
double B0(para[2]/(gBar));
int i;
#ifdef HAVE_GOMP
#pragma omp parallel for default(shared) private(i) schedule(dynamic)
#endif
for (i = 0; i < B0Index; ++i) {
fPB[i] = exp(-(fB[i]-B0)*(fB[i]-B0)/expominus);
}
#ifdef HAVE_GOMP
#pragma omp parallel for default(shared) private(i) schedule(dynamic)
#endif
for (i = B0Index; i <= BmaxIndex; ++i) {
fPB[i] = exp(-(fB[i]-B0)*(fB[i]-B0)/expoplus);
}
Normalize(0, BmaxIndex);
}
fPBExists = true;
return;
}
//-----------
// Calculate-method that does the P(B) calculation for given analytical inverse of B(z) and its derivative and n(z)
// Parameters: dt[us], dB[G], Energy[keV], Bbg[G], width[us^{-1}], weight[1]
//-----------
void TPofBCalc::Calculate(const TBofZCalcInverse *BofZ, const TTrimSPData *dataTrimSP, const vector<double> &para) {
if(fPBExists)
return;
fBmin = BofZ->GetBmin();
fBmax = BofZ->GetBmax();
int a1(static_cast<int>(floor(fBmin/fDB)));
int a2(static_cast<int>(ceil(fBmin/fDB)));
int a3(static_cast<int>(floor(fBmax/fDB)));
int a4(static_cast<int>(ceil(fBmax/fDB)));
unsigned int firstZerosEnd ((a1 < a2) ? a1 : ((a1 > 0) ? (a1 - 1) : 0));
unsigned int lastZerosStart ((a3 < a4) ? a4 : (a4 + 1));
if (lastZerosStart >= fPBSize) {
lastZerosStart = fPBSize - 1;
}
unsigned int i;
// calculate p(B) from the inverse of B(z)
for (i = firstZerosEnd; i <= lastZerosStart; ++i) {
vector< pair<double, double> > inv;
inv = BofZ->GetInverseAndDerivative(fB[i]);
for (unsigned int j(0); j < inv.size(); ++j) {
fPB[i] += dataTrimSP->GetNofZ(inv[j].first, para[2])*fabs(inv[j].second);
}
// if (fPB[i])
// cout << fB[i] << " " << fPB[i] << endl;
}
// normalize p(B)
Normalize(firstZerosEnd, lastZerosStart);
if(para.size() == 6 && para[5] != 0.0)
AddBackground(para[3], para[4], para[5]);
}
//-----------
// Calculate-method that does the P(B) calculation for given B(z) and n(z)
// Parameters: dt[us], dB[G], Energy[keV]
//-----------
void TPofBCalc::Calculate(const TBofZCalc *BofZ, const TTrimSPData *dataTrimSP, const vector<double> &para, unsigned int zonk) {
if(fPBExists)
return;
fBmin = BofZ->GetBmin();
fBmax = BofZ->GetBmax();
int a1(static_cast<int>(floor(fBmin/fDB)));
int a2(static_cast<int>(ceil(fBmin/fDB)));
int a3(static_cast<int>(floor(fBmax/fDB)));
int a4(static_cast<int>(ceil(fBmax/fDB)));
unsigned int firstZerosEnd ((a1 < a2) ? a1 : ((a1 > 0) ? (a1 - 1) : 0));
unsigned int lastZerosStart ((a3 < a4) ? a4 : (a4 + 1));
double BB, BBnext;
double zm, zp, zNext, dz;
// calculate p(B) from B(z)
vector<double> *bofzZ = BofZ->DataZ();
vector<double> *bofzBZ = BofZ->DataBZ();
double ddZ(BofZ->GetDZ());
/* USED FOR DEBUGGING-----------------------------------
cout << "Bmin = " << fBmin << ", Bmax = " << fBmax << endl;
time_t seconds;
seconds = time (NULL);
char debugfile[50];
int n = sprintf (debugfile, "test_Bz_%ld_%f.dat", seconds, fBmin);
if (n > 0) {
ofstream of(debugfile);
// assure(of, debugfile);
for (unsigned int i(0); i<bofzZ.size(); i++) {
of << bofzZ[i] << " " << bofzBZ[i] << endl;
}
of.close();
}
char debugfile1[50];
int n1 = sprintf (debugfile1, "test_NZ_%ld_%f.dat", seconds, para[2]);
char debugfile2[50];
int n2 = sprintf (debugfile2, "test_NZgss_%ld_%f.dat", seconds, para[2]);
if (n1 > 0) {
ofstream of1(debugfile1);
// assure(of1, debugfile1);
dataTrimSP.Normalize(para[2]);
for (unsigned int i(0); i<dataTrimSP.DataZ(para[2]).size(); i++) {
of1 << dataTrimSP.DataZ(para[2])[i] << " " << dataTrimSP.DataNZ(para[2])[i] << " " << dataTrimSP.OrigDataNZ(para[2])[i] << endl;
}
of1.close();
}
if (n2 > 0) {
ofstream of2(debugfile2);
// assure(of1, debugfile1);
dataTrimSP.ConvolveGss(10.0,para[2]);
dataTrimSP.Normalize(para[2]);
for (unsigned int i(0); i<dataTrimSP.DataZ(para[2]).size(); i++) {
of2 << dataTrimSP.DataZ(para[2])[i] << " " << dataTrimSP.DataNZ(para[2])[i] << " " << dataTrimSP.OrigDataNZ(para[2])[i] << endl;
}
of2.close();
}
/---------------------------------------------------------*/
// dataTrimSP.ConvolveGss(10.0,para[2]); // convolve implantation profile by gaussian
double nn;
bool zNextFound(false);
unsigned int i;
for (i = 0; i <= lastZerosStart; i++) {
BB = fB[i];
BBnext = fB[i+1];
for ( unsigned int j(0); j < bofzZ->size() - 1; j++ ) {
if ( (*bofzBZ)[j] >= BB && (*bofzBZ)[j+1] <= BB ) {
zm = (BB-(*bofzBZ)[j])*ddZ/((*bofzBZ)[j+1]-(*bofzBZ)[j]) + (*bofzZ)[j];
for (unsigned int k(0); k < j; k++) {
if ( ( (*bofzBZ)[j-k] <= BBnext && (*bofzBZ)[j-k-1] >= BBnext ) ) {
// cout << "1 " << j << " " << k << endl;
zNext = (BBnext-(*bofzBZ)[j-k-1])*ddZ/((*bofzBZ)[j-k]-(*bofzBZ)[j-k-1]) + (*bofzZ)[j-k-1];
zNextFound = true;
break;
}
}
if(zNextFound) {
zNextFound = false;
dz = zNext-zm;
nn = dataTrimSP->GetNofZ(zm, para[2]);
if (nn != -1.0) {
// cout << "zNext = " << zNextm << ", zm = " << zm << ", dz = " << dz << endl;
fPB[i] += nn*fabs(dz/fDB);
}
}
} else if ((*bofzBZ)[j] <= BB && (*bofzBZ)[j+1] >= BB ) {
zp = (BB-(*bofzBZ)[j])*ddZ/((*bofzBZ)[j+1]-(*bofzBZ)[j]) + (*bofzZ)[j];
for (unsigned int k(0); k < bofzZ->size() - j - 1; k++) {
if ( ( (*bofzBZ)[j+k] <= BBnext && (*bofzBZ)[j+k+1] >= BBnext ) ) {
// cout << "2 " << j << " " << k << endl;
zNext = (BBnext-(*bofzBZ)[j+k])*ddZ/((*bofzBZ)[j+k+1]-(*bofzBZ)[j+k]) + (*bofzZ)[j+k];
zNextFound = true;
break;
}
}
if(zNextFound) {
zNextFound = false;
dz = zNext-zp;
nn = dataTrimSP->GetNofZ(zp, para[2]);
if (nn != -1.0) {
// cout << "zNext = " << zNextp << ", zp = " << zp << ", dz = " << dz << endl;
fPB[i] += nn*fabs(dz/fDB);
}
}
}
}
}
bofzZ = 0;
bofzBZ = 0;
// normalize p(B)
Normalize(firstZerosEnd, lastZerosStart);
fPBExists = true;
return;
}
//-----------
// Calculate method that does the P(B) calculation for a bulk vortex lattice
// Parameters: dt[us], dB[G] [, Bbg[G], width[us^{-1}], weight[1] ]
//-----------
void TPofBCalc::Calculate(const TBulkVortexFieldCalc *vortexLattice, const vector<double> &para) {
if(fPBExists)
return;
fBmin = vortexLattice->GetBmin();
fBmax = vortexLattice->GetBmax();
// int a1(static_cast<int>(floor(fBmin/fDB)));
// int a2(static_cast<int>(ceil(fBmin/fDB)));
int a3(static_cast<int>(floor(fBmax/fDB)));
int a4(static_cast<int>(ceil(fBmax/fDB)));
//unsigned int firstZerosEnd ((a1 < a2) ? a1 : ((a1 > 0) ? (a1 - 1) : 0));
unsigned int lastZerosStart ((a3 < a4) ? a4 : (a4 + 1));
unsigned int numberOfSteps(vortexLattice->GetNumberOfSteps());
unsigned int numberOfStepsSq(numberOfSteps*numberOfSteps);
unsigned int numberOfSteps_2(numberOfSteps/2);
//unsigned int numberOfStepsSq_2(numberOfStepsSq/2);
if (lastZerosStart >= fPBSize)
lastZerosStart = fPBSize - 1;
// cout << endl << fBmin << " " << fBmax << " " << firstZerosEnd << " " << lastZerosStart << " " << numberOfSteps << endl;
if (!vortexLattice->GridExists()) {
vortexLattice->CalculateGrid();
}
double *vortexFields = vortexLattice->DataB();
unsigned int fill_index;
if (para.size() == 7 && para[6] == 1.0 && para[5] != 0.0 && vortexLattice->IsTriangular()) {
// weight distribution with Gaussian around vortex-cores
double Rsq1, Rsq2, Rsq3, Rsq4, Rsq5, Rsq6, sigmaSq(-0.5*para[5]*para[5]);
for (unsigned int j(0); j < numberOfSteps_2; ++j) {
for (unsigned int i(0); i < numberOfSteps_2; ++i) {
fill_index = static_cast<unsigned int>(ceil(fabs((vortexFields[i + numberOfSteps*j]/fDB))));
if (fill_index < fPBSize) {
Rsq1 = static_cast<double>(3*i*i + j*j)/static_cast<double>(numberOfStepsSq);
Rsq2 = static_cast<double>(3*(numberOfSteps_2 - i)*(numberOfSteps_2 - i) \
+ (numberOfSteps_2 - j)*(numberOfSteps_2 - j))/static_cast<double>(numberOfStepsSq);
Rsq3 = static_cast<double>(3*(numberOfSteps - i)*(numberOfSteps - i) \
+ j*j)/static_cast<double>(numberOfStepsSq);
Rsq4 = static_cast<double>(3*(numberOfSteps_2 - i)*(numberOfSteps_2 - i) \
+ (numberOfSteps_2 + j)*(numberOfSteps_2 + j))/static_cast<double>(numberOfStepsSq);
Rsq5 = static_cast<double>(3*i*i \
+ (numberOfSteps - j)*(numberOfSteps - j))/static_cast<double>(numberOfStepsSq);
Rsq6 = static_cast<double>(3*(numberOfSteps_2 + i)*(numberOfSteps_2 + i) \
+ (numberOfSteps_2 - j)*(numberOfSteps_2 - j))/static_cast<double>(numberOfStepsSq);
fPB[fill_index] += exp(sigmaSq*Rsq1) + exp(sigmaSq*Rsq2) + exp(sigmaSq*Rsq3) \
+ exp(sigmaSq*Rsq4) + exp(sigmaSq*Rsq5) + exp(sigmaSq*Rsq6);
}
}
}
} else if (para.size() == 7 && para[6] == 2.0 && para[5] != 0.0 && vortexLattice->IsTriangular()) {
// weight distribution with Lorentzian around vortex-cores
double Rsq1, Rsq2, Rsq3, Rsq4, Rsq5, Rsq6, sigmaSq(para[5]*para[5]);
// ofstream of("LorentzWeight.dat");
for (unsigned int j(0); j < numberOfSteps_2; ++j) {
for (unsigned int i(0); i < numberOfSteps_2; ++i) {
fill_index = static_cast<unsigned int>(ceil(fabs((vortexFields[i + numberOfSteps*j]/fDB))));
if (fill_index < fPBSize) {
Rsq1 = static_cast<double>(3*i*i + j*j)/static_cast<double>(numberOfStepsSq);
Rsq2 = static_cast<double>(3*(numberOfSteps_2 - i)*(numberOfSteps_2 - i) \
+ (numberOfSteps_2 - j)*(numberOfSteps_2 - j))/static_cast<double>(numberOfStepsSq);
Rsq3 = static_cast<double>(3*(numberOfSteps - i)*(numberOfSteps - i) \
+ j*j)/static_cast<double>(numberOfStepsSq);
Rsq4 = static_cast<double>(3*(numberOfSteps_2 - i)*(numberOfSteps_2 - i) \
+ (numberOfSteps_2 + j)*(numberOfSteps_2 + j))/static_cast<double>(numberOfStepsSq);
Rsq5 = static_cast<double>(3*i*i \
+ (numberOfSteps - j)*(numberOfSteps - j))/static_cast<double>(numberOfStepsSq);
Rsq6 = static_cast<double>(3*(numberOfSteps_2 + i)*(numberOfSteps_2 + i) \
+ (numberOfSteps_2 - j)*(numberOfSteps_2 - j))/static_cast<double>(numberOfStepsSq);
fPB[fill_index] += 1.0/(1.0+sigmaSq*Rsq1) + 1.0/(1.0+sigmaSq*Rsq2) + 1.0/(1.0+sigmaSq*Rsq3) \
+ 1.0/(1.0+sigmaSq*Rsq4) + 1.0/(1.0+sigmaSq*Rsq5) + 1.0/(1.0+sigmaSq*Rsq6);
/*
of << 1.0/(1.0+sigmaSq*Rsq1) + 1.0/(1.0+sigmaSq*Rsq2) + 1.0/(1.0+sigmaSq*Rsq3) \
+ 1.0/(1.0+sigmaSq*Rsq4) + 1.0/(1.0+sigmaSq*Rsq5) + 1.0/(1.0+sigmaSq*Rsq6) << " ";
*/
}
}
// of << endl;
}
// of.close();
} else if (para.size() == 8 && para[6] == 3.0 && para[5] != 0.0 && vortexLattice->IsTriangular()) {
// add antiferromagnetic fields in the vortex cores
double field, Rsq1, Rsq2, Rsq3, Rsq4, Rsq5, Rsq6, one_xiSq(-1.0/(para[7]*para[7]));
// ofstream of("AFfields.dat");
for (unsigned int j(0); j < numberOfSteps_2; ++j) {
for (unsigned int i(0); i < numberOfSteps_2; ++i) {
Rsq1 = static_cast<double>(3*i*i + j*j)/static_cast<double>(numberOfStepsSq);
Rsq2 = static_cast<double>(3*(numberOfSteps_2 - i)*(numberOfSteps_2 - i) \
+ (numberOfSteps_2 - j)*(numberOfSteps_2 - j))/static_cast<double>(numberOfStepsSq);
Rsq3 = static_cast<double>(3*(numberOfSteps - i)*(numberOfSteps - i) \
+ j*j)/static_cast<double>(numberOfStepsSq);
Rsq4 = static_cast<double>(3*(numberOfSteps_2 - i)*(numberOfSteps_2 - i) \
+ (numberOfSteps_2 + j)*(numberOfSteps_2 + j))/static_cast<double>(numberOfStepsSq);
Rsq5 = static_cast<double>(3*i*i \
+ (numberOfSteps - j)*(numberOfSteps - j))/static_cast<double>(numberOfStepsSq);
Rsq6 = static_cast<double>(3*(numberOfSteps_2 + i)*(numberOfSteps_2 + i) \
+ (numberOfSteps_2 - j)*(numberOfSteps_2 - j))/static_cast<double>(numberOfStepsSq);
field = vortexFields[i + numberOfSteps*j] \
+ para[5]*(exp(Rsq1*one_xiSq) + exp(Rsq2*one_xiSq) + exp(Rsq3*one_xiSq) \
+exp(Rsq4*one_xiSq) + exp(Rsq5*one_xiSq) + exp(Rsq6*one_xiSq));
/*
of << para[5]*(exp(Rsq1*one_xiSq) - exp(Rsq2*one_xiSq) + exp(Rsq3*one_xiSq) \
-exp(Rsq4*one_xiSq) + exp(Rsq5*one_xiSq) - exp(Rsq6*one_xiSq)) << " ";
*/
fill_index = static_cast<unsigned int>(ceil(fabs((field/fDB))));
if (fill_index < fPBSize) {
fPB[fill_index] += 1.0;
} else {
cout << "Field over the limit..." << endl;
}
}
// of << endl;
}
// of.close();
} else {
int i,j;
#ifdef HAVE_GOMP
// cannot use a reduction clause here (like e.g. in Normalize()), since pBvec[] is not a scalar variable
// therefore, we need to work on it a bit more
int n(omp_get_num_procs()), tid, offset;
vector< vector<unsigned int> > pBvec(n, vector<unsigned int>(fPBSize, 0));
int indexStep(static_cast<int>(floor(static_cast<float>(numberOfSteps_2)/static_cast<float>(n))));
#pragma omp parallel private(tid, i, j, offset, fill_index) num_threads(n)
{
tid = omp_get_thread_num();
offset = tid*indexStep;
if (tid == n-1) {
for (j = offset; j < static_cast<int>(numberOfSteps_2); ++j) {
for (i = 0; i < static_cast<int>(numberOfSteps_2); ++i) {
fill_index = static_cast<unsigned int>(ceil(fabs((vortexFields[i + numberOfSteps*j]/fDB))));
if (fill_index < fPBSize) {
pBvec[tid][fill_index] += 1;
}
}
}
} else {
for (j = 0; j < indexStep; ++j) {
for (i = 0; i < static_cast<int>(numberOfSteps_2); ++i) {
fill_index = static_cast<unsigned int>(ceil(fabs((vortexFields[offset + i + numberOfSteps*j]/fDB))));
if (fill_index < fPBSize) {
pBvec[tid][fill_index] += 1;
}
}
}
}
}
for (j = 0; j < n; ++j) {
#pragma omp parallel for default(shared) private(i) schedule(dynamic)
for (i = 0; i < static_cast<int>(fPBSize); ++i) {
fPB[i] += static_cast<double>(pBvec[j][i]);
}
pBvec[j].clear();
}
pBvec.clear();
#else
for (j = 0; j < static_cast<int>(numberOfSteps_2); ++j) {
for (i = 0; i < static_cast<int>(numberOfSteps_2); ++i) {
fill_index = static_cast<unsigned int>(ceil(fabs((vortexFields[i + numberOfSteps*j]/fDB))));
if (fill_index < fPBSize) {
fPB[fill_index] += 1.0;
}
}
}
#endif
}
vortexFields = 0;
// normalize P(B)
Normalize();
if(para.size() == 5)
AddBackground(para[2], para[3], para[4]);
fPBExists = true;
return;
}
// TPofBCalc::AddBackground, Parameters: field B[G], width s[us], weight w
void TPofBCalc::AddBackground(double B, double s, double w) {
if(!s || w<0.0 || w>1.0 || B<0.0)
return;
int i;
double BsSq(s*s/(gBar*gBar*4.0*pi*pi));
// calculate Gaussian background
double bg[fPBSize];
#ifdef HAVE_GOMP
#pragma omp parallel for default(shared) private(i) schedule(dynamic)
#endif
for(i = 0; i < static_cast<int>(fPBSize); ++i) {
bg[i] = exp(-(fB[i]-B)*(fB[i]-B)/(2.0*BsSq));
}
// normalize background
double bgsum(0.0);
#ifdef HAVE_GOMP
#pragma omp parallel for default(shared) private(i) schedule(dynamic) reduction(+:bgsum)
#endif
for (i = 0; i < static_cast<int>(fPBSize); ++i)
bgsum += bg[i];
bgsum *= fDB;
#ifdef HAVE_GOMP
#pragma omp parallel for default(shared) private(i) schedule(dynamic)
#endif
for (i = 0; i < static_cast<int>(fPBSize); ++i)
bg[i] /= bgsum;
// add background to P(B)
#ifdef HAVE_GOMP
#pragma omp parallel for default(shared) private(i) schedule(dynamic)
#endif
for (i = 0; i < static_cast<int>(fPBSize); ++i)
fPB[i] = (1.0 - w)*fPB[i] + w*bg[i];
// // check if normalization is still valid
// double pBsum(0.0);
// for (unsigned int i(0); i < sizePB; i++)
// pBsum += fPB[i];
//
// cout << "pBsum = " << pBsum << endl;
}
void TPofBCalc::ConvolveGss(double w) {
if(!w)
return;
unsigned int NFFT(fPBSize);
double TBin;
fftw_plan FFTplanToTimeDomain;
fftw_plan FFTplanToFieldDomain;
fftw_complex *FFTout;
TBin = 1.0/(gBar*static_cast<double>(NFFT-1)*fDB);
FFTout = new fftw_complex[NFFT/2 + 1]; //(fftw_complex *)fftw_malloc(sizeof(fftw_complex) * (NFFT/2+1));
// do the FFT to time domain
FFTplanToTimeDomain = fftw_plan_dft_r2c_1d(NFFT, fPB, FFTout, FFTW_ESTIMATE);
fftw_execute(FFTplanToTimeDomain);
// multiply everything by a gaussian
double GssInTimeDomain;
double expo(-2.0*PI*PI*gBar*gBar*w*w*TBin*TBin);
int i;
#ifdef HAVE_GOMP
#pragma omp parallel for default(shared) private(GssInTimeDomain, i) schedule(dynamic)
#endif
for (i = 0; i < static_cast<int>(NFFT/2+1); ++i) {
GssInTimeDomain = exp(expo*static_cast<double>(i*i));
FFTout[i][0] *= GssInTimeDomain;
FFTout[i][1] *= GssInTimeDomain;
}
// FFT back to the field domain
FFTplanToFieldDomain = fftw_plan_dft_c2r_1d(NFFT, FFTout, fPB, FFTW_ESTIMATE);
fftw_execute(FFTplanToFieldDomain);
// cleanup
fftw_destroy_plan(FFTplanToTimeDomain);
fftw_destroy_plan(FFTplanToFieldDomain);
delete[] FFTout; // fftw_free(FFTout);
FFTout = 0;
// fftw_cleanup();
// normalize p(B)
Normalize();
return;
}
double TPofBCalc::GetFirstMoment() const {
int i;
double pBsum(0.0);
#ifdef HAVE_GOMP
#pragma omp parallel for default(shared) private(i) schedule(dynamic) reduction(+:pBsum)
#endif
for (i = 0; i < static_cast<int>(fPBSize); ++i)
pBsum += fB[i]*fPB[i];
pBsum *= fDB;
return pBsum;
}
double TPofBCalc::GetCentralMoment(unsigned int n) const {
double firstMoment(GetFirstMoment());
double diff;
int i;
double pBsum(0.0);
#ifdef HAVE_GOMP
#pragma omp parallel for default(shared) private(i, diff) schedule(dynamic) reduction(+:pBsum)
#endif
for (i = 0; i < static_cast<int>(fPBSize); ++i) {
diff = fB[i]-firstMoment;
pBsum += pow(diff, static_cast<double>(n))*fPB[i];
}
pBsum *= fDB;
return pBsum;
}
double TPofBCalc::GetSkewnessAlpha() const {
double M2(GetCentralMoment(2));
double M3(GetCentralMoment(3));
return M3 > 0.0 ? pow(M3, 1.0/3.0)/pow(M2, 0.5) : -pow(-M3, 1.0/3.0)/pow(M2, 0.5);
}

View File

@@ -0,0 +1,437 @@
/***************************************************************************
TPofTCalc.cpp
Author: Bastian M. Wojek
e-mail: bastian.wojek@psi.ch
2008/11/16
***************************************************************************/
/***************************************************************************
TPofTCalc::FakeData Method based on Andreas Suter's fakeData
***************************************************************************/
/***************************************************************************
* Copyright (C) 2008-2009 by Bastian M. Wojek, Andreas Suter *
* *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "TPofTCalc.h"
#include "fftw3.h"
#include <cmath>
#include <iostream>
#include <cstdio>
#include <cstdlib>
#ifdef HAVE_GOMP
#include <omp.h>
#endif
#include <TString.h>
#include <TObjArray.h>
#include <TObjString.h>
#include <TFile.h>
#include <TH1F.h>
#include <TFolder.h>
#include "TLemRunHeader.h"
/* USED FOR DEBUGGING -----------------------*/
#include <ctime>
#include <fstream>
/*--------------------------------------------*/
//------------------
// Constructor of the TPofTCalc class - it creates the FFT plan
// Parameters: phase, dt, dB
//------------------
TPofTCalc::TPofTCalc (const TPofBCalc *PofB, const string &wisdom, const vector<double> &par) : fWisdom(wisdom) {
#ifdef HAVE_LIBFFTW3_THREADS
int init_threads(fftw_init_threads());
if (!init_threads)
cout << "TPofTCalc::TPofTCalc: Couldn't initialize multiple FFTW-threads ..." << endl;
else {
#ifdef HAVE_GOMP
fftw_plan_with_nthreads(omp_get_num_procs());
#else
fftw_plan_with_nthreads(2);
#endif /* HAVE_GOMP */
}
#endif /* HAVE_LIBFFTW3_THREADS */
fNFFT = static_cast<int>(1.0/(gBar*par[1]*par[2]));
if (fNFFT % 2) {
fNFFT += 1;
} else {
fNFFT += 2;
}
fTBin = 1.0/(gBar*double(fNFFT-1)*par[2]);
const int NFFT_2p1(fNFFT/2 + 1);
// allocating memory for the time- and polarisation vectors
fT = new double[NFFT_2p1]; //static_cast<double *>(malloc(sizeof(double) * NFFT_2p1));
fPT = new double[NFFT_2p1]; //static_cast<double *>(malloc(sizeof(double) * NFFT_2p1));
int i;
#ifdef HAVE_GOMP
#pragma omp parallel for default(shared) private(i) schedule(dynamic)
#endif
for (i = 0; i < NFFT_2p1; i++) {
fT[i] = static_cast<double>(i)*fTBin;
}
fFFTin = PofB->DataPB();
fFFTout = new fftw_complex[NFFT_2p1]; //static_cast<fftw_complex *>(fftw_malloc(sizeof(fftw_complex) * NFFT_2p1));
// Load wisdom from file if it exists and should be used
fUseWisdom = true;
int wisdomLoaded(0);
FILE *wordsOfWisdomR;
wordsOfWisdomR = fopen(wisdom.c_str(), "r");
if (wordsOfWisdomR == NULL) {
fUseWisdom = false;
} else {
wisdomLoaded = fftw_import_wisdom_from_file(wordsOfWisdomR);
fclose(wordsOfWisdomR);
}
if (!wisdomLoaded) {
fUseWisdom = false;
}
// create the FFT plan
if (fUseWisdom)
fFFTplan = fftw_plan_dft_r2c_1d(fNFFT, fFFTin, fFFTout, FFTW_EXHAUSTIVE);
else
fFFTplan = fftw_plan_dft_r2c_1d(fNFFT, fFFTin, fFFTout, FFTW_ESTIMATE);
}
//---------------------
// Destructor of the TPofTCalc class - it saves the FFT plan and cleans up
//---------------------
TPofTCalc::~TPofTCalc() {
// if a wisdom file is used export the wisdom so it has not to be checked for the FFT-plan next time
if (fUseWisdom) {
FILE *wordsOfWisdomW;
wordsOfWisdomW = fopen(fWisdom.c_str(), "w");
if (wordsOfWisdomW == NULL) {
cout << "TPofTCalc::~TPofTCalc(): Could not open file ... No wisdom is exported..." << endl;
} else {
fftw_export_wisdom_to_file(wordsOfWisdomW);
fclose(wordsOfWisdomW);
}
}
// clean up
fftw_destroy_plan(fFFTplan);
delete[] fFFTout; //fftw_free(fFFTout);
fFFTout = 0;
// fftw_cleanup();
// fftw_cleanup_threads();
delete[] fT;
fT = 0;
delete[] fPT;
fPT = 0;
}
//--------------
// Method that does the FFT of a given p(B)
//--------------
void TPofTCalc::DoFFT() {
fftw_execute(fFFTplan);
}
//---------------------
// Method for calculating the muon spin polarization P(t) from the Fourier transformed p(B)
// Parameters: phase, dt, dB
//---------------------
void TPofTCalc::CalcPol(const vector<double> &par) {
double sinph(sin(par[0]*PI/180.0)), cosph(cos(par[0]*PI/180.0));
int i;
#ifdef HAVE_GOMP
#pragma omp parallel for default(shared) private(i) schedule(dynamic)
#endif
for (i=0; i<fNFFT/2+1; i++){
fPT[i] = (cosph*fFFTout[i][0] + sinph*fFFTout[i][1])*par[2];
}
}
//---------------------
// Method for evaluating P(t) at a given t
//---------------------
double TPofTCalc::Eval(double t) const {
int i(static_cast<int>(t/fTBin));
if (i < fNFFT/2){
return fPT[i]+(fPT[i+1]-fPT[i])/(fT[i+1]-fT[i])*(t-fT[i]);
}
cout << "TPofTCalc::Eval: No data for the time " << t << " us available! Returning -999.0 ..." << endl;
return -999.0;
}
//---------------------
// Method for generating fake LEM decay histograms from p(B)
// Parameters: output filename, par(dt, dB, timeres, channels, asyms, phases, t0s, N0s, bgs) optPar(field, energy)
//---------------------
void TPofTCalc::FakeData(const string &rootOutputFileName, const vector<double> &par, const vector<double> *optPar = 0) {
//determine the number of histograms to be built
unsigned int numHist(0);
if(!((par.size()-4)%5))
numHist=(par.size()-4)/5;
if(!numHist){
cout << "TPofTCalc::FakeData: The number of parameters for the histogram creation is not correct. Do nothing." << endl;
return;
}
cout << "TPofTCalc::FakeData: " << numHist << " histograms to be built" << endl;
int nChannels = int(par[3]);
vector<int> t0;
vector<double> asy0;
vector<double> phase0;
vector<double> N0;
vector<double> bg;
for(unsigned int i(0); i<numHist; i++) {
t0.push_back(int(par[i+4+numHist*2]));
asy0.push_back(par[i+4]);
phase0.push_back(par[i+4+numHist]);
N0.push_back(par[i+4+numHist*3]);
bg.push_back(par[i+4+numHist*4]);
}
vector<double> param; // Parameters for TPofTCalc::CalcPol
param.push_back(0.0); // phase
param.push_back(par[0]); // dt
param.push_back(par[1]); // dB
vector< vector<double> > asy;
vector<double> asydata(nChannels);
double ttime;
int j,k;
for(unsigned int i(0); i<numHist; i++) {
param[0]=phase0[i];
// calculate asymmetry
CalcPol(param);
#ifdef HAVE_GOMP
#pragma omp parallel for default(shared) private(j,ttime,k) schedule(dynamic)
#endif
for(j=0; j<nChannels; j++) {
ttime=j*par[2];
k = static_cast<int>(floor(ttime/fTBin));
asydata[j]=asy0[i]*(fPT[k]+(fPT[k+1]-fPT[k])/fTBin*(ttime-fT[k]));
}
// end omp
// for(unsigned int k(0); k<fT.size()-1; k++){
// if (ttime < fT[k+1]) {
// pol=fPT[k]+(fPT[k+1]-fPT[k])/(fT[k+1]-fT[k])*(ttime-fT[k]);
// asydata.push_back(asy0[i]*pol);
// break;
// }
// }
asy.push_back(asydata);
// asydata.clear();
cout << "TPofTCalc::FakeData: " << i+1 << "/" << numHist << " calculated!" << endl;
}
// calculate the histograms
vector< vector<double> > histo;
vector<double> data(nChannels);
for (unsigned int i(0); i<numHist; i++) { // loop over all histos
#ifdef HAVE_GOMP
#pragma omp parallel for default(shared) private(j) schedule(dynamic)
#endif
for (j = 0; j<nChannels; j++) { // loop over time
if (j < t0[i]) // j<t0
data[j] = bg[i]; // background
else
data[j] = N0[i]*exp(-par[2]*static_cast<double>(j-t0[i])/tauMu)*(1.0+asy[i][j-t0[i]])+bg[i];
}
// end omp
histo.push_back(data);
cout << "TPofTCalc::FakeData: " << i+1 << "/" << numHist << " done ..." << endl;
}
// add Poisson noise to the histograms
cout << "TPofTCalc::FakeData: Adding Poisson noise ..." << endl;
TH1F* theoHisto;
TH1F* fakeHisto;
vector<TH1F*> histoData;
TString name;
for (unsigned int i(0); i<numHist; i++) { // loop over all histos
// create histos
name = "theoHisto";
name += i;
theoHisto = new TH1F(name.Data(), name.Data(), int(par[3]), -par[2]/2.0, (par[3]+0.5)*par[2]);
if (i < 10)
name = "hDecay0";
else
name = "hDecay";
name += i;
fakeHisto = new TH1F(name.Data(), name.Data(), int(par[3]), -par[2]/2.0, (par[3]+0.5)*par[2]);
// fill theoHisto
#ifdef HAVE_GOMP
#pragma omp parallel for default(shared) private(j) schedule(dynamic)
#endif
for (j = 0; j<nChannels; j++)
theoHisto->SetBinContent(j, histo[i][j]);
// end omp
// fill fakeHisto
fakeHisto->FillRandom(theoHisto, (int)theoHisto->Integral());
// keep fake data
histoData.push_back(fakeHisto);
// cleanup
if (theoHisto) {
delete theoHisto;
theoHisto = 0;
}
}
cout << "TPofTCalc::FakeData: Write histograms and header information to the file ..." << endl;
// save the histograms as root files
// create run info folder and content
TFolder *runInfoFolder = new TFolder("RunInfo", "Run Info");
TLemRunHeader *runHeader = new TLemRunHeader();
//sprintf(str, "Fake Data generated from %s", pBFileName.Data());
runHeader->SetRunTitle("Fake Data");
if (optPar && (optPar->size() > 1)) { // set energy and field if they were specified
runHeader->SetImpEnergy((*optPar)[1]);
runHeader->SetSampleBField((*optPar)[0], 0.0f);
}
float fval = par[2]*1000.; //us->ns
runHeader->SetTimeResolution(fval);
runHeader->SetNChannels(nChannels);
runHeader->SetNHist(histoData.size());
double *t0array = new double[histoData.size()];
for (unsigned int i(0); i<histoData.size(); i++)
t0array[i] = t0[i];
runHeader->SetTimeZero(t0array);
if (t0array) {
delete[] t0array;
t0array = 0;
}
runInfoFolder->Add(runHeader);
// create decay histo folder and content
TFolder *histoFolder = new TFolder("histos", "histos");
TFolder *decayAnaModule = new TFolder("DecayAnaModule", "DecayAnaModule");
histoFolder->Add(decayAnaModule);
// no post pileup corrected (NPP)
for (unsigned int i(0); i<histoData.size(); i++)
decayAnaModule->Add(histoData[i]);
// post pileup corrected (PPC)
vector<TH1F*> histoDataPPC;
for (unsigned int i(0); i<histoData.size(); i++) {
histoDataPPC.push_back(dynamic_cast<TH1F*>(histoData[i]->Clone()));
if (i < 10)
name = "hDecay2";
else
name = "hDecay";
name += i;
histoDataPPC[i]->SetNameTitle(name.Data(), name.Data());
decayAnaModule->Add(histoDataPPC[i]);
}
// write file
TFile fdf(rootOutputFileName.c_str(), "recreate");
runInfoFolder->Write("RunInfo", TObject::kSingleKey);
histoFolder->Write();
fdf.Close();
// clean up
for (unsigned int i(0); i<histo.size(); i++) {
asy[i].clear();
histo[i].clear();
}
asy.clear();
histo.clear();
for (unsigned int i(0); i<histoData.size(); i++) {
delete histoData[i];
delete histoDataPPC[i];
}
histoData.clear();
histoDataPPC.clear();
fakeHisto = 0;
delete histoFolder; histoFolder = 0;
delete decayAnaModule; decayAnaModule = 0;
delete runInfoFolder; runInfoFolder = 0;
delete runHeader; runHeader = 0;
t0.clear();
asy0.clear();
phase0.clear();
N0.clear();
bg.clear();
cout << "TPofTCalc::FakeData: DONE." << endl << endl;
return;
}

View File

@@ -0,0 +1,173 @@
/***************************************************************************
TSkewedGss.cpp
Author: Bastian M. Wojek
e-mail: bastian.wojek@psi.ch
$Id: TSkewedGss.cpp 4800 2011-03-16 16:43:13Z l_wojek $
***************************************************************************/
/***************************************************************************
* Copyright (C) 2009 by Bastian M. Wojek *
* *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************/
#include "TSkewedGss.h"
#include <iostream>
#include <cassert>
using namespace std;
#include <TSAXParser.h>
#include "BMWStartupHandler.h"
ClassImp(TSkewedGss)
//------------------
// Destructor of the TSkewedGss class -- cleaning up
//------------------
TSkewedGss::~TSkewedGss() {
fPar.clear();
fParForPofB.clear();
fParForPofT.clear();
delete fPofB;
fPofB = 0;
delete fPofT;
fPofT = 0;
}
//------------------
// Constructor of the TSkewedGss class -- creates (a pointer to) the TPofTCalc object (with the FFT plan)
//------------------
TSkewedGss::TSkewedGss() : fCalcNeeded(true), fFirstCall(true) {
// read startup file
string startup_path_name("BMW_startup.xml");
TSAXParser *saxParser = new TSAXParser();
BMWStartupHandler *startupHandler = new BMWStartupHandler();
saxParser->ConnectToHandler("BMWStartupHandler", startupHandler);
int status (saxParser->ParseFile(startup_path_name.c_str()));
// check for parse errors
if (status) { // error
cerr << endl << "**ERROR** reading/parsing " << startup_path_name << " failed." \
<< endl << "**ERROR** Please make sure that the file exists in the local directory and it is set up correctly!" \
<< endl;
assert(false);
}
fWisdom = startupHandler->GetWisdomFile();
fParForPofT.push_back(0.0); // phase
fParForPofT.push_back(startupHandler->GetDeltat());
fParForPofT.push_back(startupHandler->GetDeltaB());
fParForPofB.push_back(startupHandler->GetDeltat());
fParForPofB.push_back(startupHandler->GetDeltaB());
fParForPofB.push_back(0.0); // nu0
fParForPofB.push_back(0.0); // s-
fParForPofB.push_back(0.0); // s+
fPofB = new TPofBCalc(fParForPofB);
fPofT = new TPofTCalc(fPofB, fWisdom, fParForPofT);
// clean up
if (saxParser) {
delete saxParser;
saxParser = 0;
}
if (startupHandler) {
delete startupHandler;
startupHandler = 0;
}
}
//------------------
// TSkewedGss-Method that calls the procedures to create p(B) and P(t)
// It finally returns P(t) for a given t.
// Parameters: all the parameters for the function to be fitted through TSkewedGss (phase,freq0,sigma-,sigma+)
//------------------
double TSkewedGss::operator()(double t, const vector<double> &par) const {
assert(par.size() == 4);
if(t<0.0)
return 1.0;
// check if the function is called the first time and if yes, read in parameters
if(fFirstCall){
fPar = par;
fFirstCall=false;
}
// check if any parameter has changed
bool par_changed(false);
bool only_phase_changed(false);
for (unsigned int i(0); i<fPar.size(); i++) {
if( fPar[i]-par[i] ) {
fPar[i] = par[i];
par_changed = true;
if (i == 0) {
only_phase_changed = true;
} else {
only_phase_changed = false;
}
}
}
if (par_changed)
fCalcNeeded = true;
// if model parameters have changed, recalculate P(B) and P(t)
if (fCalcNeeded) {
fParForPofT[0] = par[0]; // phase
if(!only_phase_changed) {
// cout << " Parameters have changed, (re-)calculating p(B) and P(t) now..." << endl;
fParForPofB[2] = par[1]; // nu0
fParForPofB[3] = par[2]; // sigma-
fParForPofB[4] = par[3]; // sigma+
fPofB->Calculate("skg", fParForPofB);
fPofT->DoFFT();
}/* else {
cout << "Only the phase parameter has changed, (re-)calculating P(t) now..." << endl;
}*/
fPofT->CalcPol(fParForPofT);
fCalcNeeded = false;
}
return fPofT->Eval(t);
}

File diff suppressed because it is too large Load Diff