Merge branch 'master' into root6
This commit is contained in:
@@ -5866,9 +5866,10 @@ void PMsrHandler::CheckMaxLikelihood()
|
||||
{
|
||||
if (!fStatistic.fChisq) {
|
||||
for (UInt_t i=0; i<fRuns.size(); i++) {
|
||||
if ((fRuns[i].GetFitType() != MSR_FITTYPE_SINGLE_HISTO) && (fGlobal.GetFitType() != MSR_FITTYPE_SINGLE_HISTO)) {
|
||||
if ((fRuns[i].GetFitType() != MSR_FITTYPE_SINGLE_HISTO) && (fGlobal.GetFitType() != MSR_FITTYPE_SINGLE_HISTO) &&
|
||||
(fRuns[i].GetFitType() != MSR_FITTYPE_MU_MINUS) && (fGlobal.GetFitType() != MSR_FITTYPE_MU_MINUS)) {
|
||||
cerr << endl << ">> PMsrHandler::CheckMaxLikelihood: **WARNING**: Maximum Log Likelihood Fit is only implemented";
|
||||
cerr << endl << ">> for Single Histogram Fit. Will fall back to Chi Square Fit.";
|
||||
cerr << endl << ">> for Single Histogram and Mu Minus Fits. Will fall back to Chi Square Fit.";
|
||||
cerr << endl << endl;
|
||||
fStatistic.fChisq = true;
|
||||
break;
|
||||
|
||||
@@ -62,6 +62,9 @@ PRunAsymmetry::PRunAsymmetry() : PRunBase()
|
||||
// the fit range can be changed in the command block, these variables need to be accessible
|
||||
fGoodBins[0] = -1;
|
||||
fGoodBins[1] = -1;
|
||||
|
||||
fStartTimeBin = -1;
|
||||
fEndTimeBin = -1;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
@@ -82,6 +85,9 @@ PRunAsymmetry::PRunAsymmetry(PMsrHandler *msrInfo, PRunDataHandler *rawData, UIn
|
||||
fGoodBins[0] = -1;
|
||||
fGoodBins[1] = -1;
|
||||
|
||||
fStartTimeBin = -1;
|
||||
fEndTimeBin = -1;
|
||||
|
||||
fPacking = fRunInfo->GetPacking();
|
||||
if (fPacking == -1) { // i.e. packing is NOT given in the RUN-block, it must be given in the GLOBAL-block
|
||||
fPacking = fMsrInfo->GetMsrGlobal()->GetPacking();
|
||||
@@ -189,15 +195,7 @@ Double_t PRunAsymmetry::CalcChiSquare(const std::vector<Double_t>& par)
|
||||
|
||||
// calculate chi square
|
||||
Double_t time(1.0);
|
||||
Int_t i, N(static_cast<Int_t>(fData.GetValue()->size()));
|
||||
|
||||
// In order not to have an IF in the next loop, determine the start and end bins for the fit range now
|
||||
Int_t startTimeBin = static_cast<Int_t>(ceil((fFitStartTime - fData.GetDataTimeStart())/fData.GetDataTimeStep()));
|
||||
if (startTimeBin < 0)
|
||||
startTimeBin = 0;
|
||||
Int_t endTimeBin = static_cast<Int_t>(floor((fFitEndTime - fData.GetDataTimeStart())/fData.GetDataTimeStep())) + 1;
|
||||
if (endTimeBin > N)
|
||||
endTimeBin = N;
|
||||
Int_t i;
|
||||
|
||||
// Calculate the theory function once to ensure one function evaluation for the current set of parameters.
|
||||
// This is needed for the LF and user functions where some non-thread-save calculations only need to be calculated once
|
||||
@@ -206,12 +204,12 @@ Double_t PRunAsymmetry::CalcChiSquare(const std::vector<Double_t>& par)
|
||||
asymFcnValue = fTheory->Func(time, par, fFuncValues);
|
||||
|
||||
#ifdef HAVE_GOMP
|
||||
Int_t chunk = (endTimeBin - startTimeBin)/omp_get_num_procs();
|
||||
Int_t chunk = (fEndTimeBin - fStartTimeBin)/omp_get_num_procs();
|
||||
if (chunk < 10)
|
||||
chunk = 10;
|
||||
#pragma omp parallel for default(shared) private(i,time,diff,asymFcnValue,a,b,f) schedule(dynamic,chunk) reduction(+:chisq)
|
||||
#endif
|
||||
for (i=startTimeBin; i < endTimeBin; ++i) {
|
||||
for (i=fStartTimeBin; i<fEndTimeBin; ++i) {
|
||||
time = fData.GetDataTimeStart() + (Double_t)i*fData.GetDataTimeStep();
|
||||
switch (fAlphaBetaTag) {
|
||||
case 1: // alpha == 1, beta == 1
|
||||
@@ -389,15 +387,15 @@ void PRunAsymmetry::SetFitRangeBin(const TString fitRange)
|
||||
void PRunAsymmetry::CalcNoOfFitBins()
|
||||
{
|
||||
// In order not having to loop over all bins and to stay consistent with the chisq method, calculate the start and end bins explicitly
|
||||
Int_t startTimeBin = static_cast<Int_t>(ceil((fFitStartTime - fData.GetDataTimeStart())/fData.GetDataTimeStep()));
|
||||
if (startTimeBin < 0)
|
||||
startTimeBin = 0;
|
||||
Int_t endTimeBin = static_cast<Int_t>(floor((fFitEndTime - fData.GetDataTimeStart())/fData.GetDataTimeStep())) + 1;
|
||||
if (endTimeBin > static_cast<Int_t>(fData.GetValue()->size()))
|
||||
endTimeBin = fData.GetValue()->size();
|
||||
fStartTimeBin = static_cast<Int_t>(ceil((fFitStartTime - fData.GetDataTimeStart())/fData.GetDataTimeStep()));
|
||||
if (fStartTimeBin < 0)
|
||||
fStartTimeBin = 0;
|
||||
fEndTimeBin = static_cast<Int_t>(floor((fFitEndTime - fData.GetDataTimeStart())/fData.GetDataTimeStep())) + 1;
|
||||
if (fEndTimeBin > static_cast<Int_t>(fData.GetValue()->size()))
|
||||
fEndTimeBin = fData.GetValue()->size();
|
||||
|
||||
if (endTimeBin > startTimeBin)
|
||||
fNoOfFitBins = endTimeBin - startTimeBin;
|
||||
if (fEndTimeBin > fStartTimeBin)
|
||||
fNoOfFitBins = fEndTimeBin - fStartTimeBin;
|
||||
else
|
||||
fNoOfFitBins = 0;
|
||||
}
|
||||
|
||||
@@ -52,6 +52,9 @@ PRunMuMinus::PRunMuMinus() : PRunBase()
|
||||
fGoodBins[0] = -1;
|
||||
fGoodBins[1] = -1;
|
||||
|
||||
fStartTimeBin = -1;
|
||||
fEndTimeBin = -1;
|
||||
|
||||
fHandleTag = kEmpty;
|
||||
}
|
||||
|
||||
@@ -87,6 +90,9 @@ PRunMuMinus::PRunMuMinus(PMsrHandler *msrInfo, PRunDataHandler *rawData, UInt_t
|
||||
fGoodBins[0] = -1;
|
||||
fGoodBins[1] = -1;
|
||||
|
||||
fStartTimeBin = -1;
|
||||
fEndTimeBin = -1;
|
||||
|
||||
if (!PrepareData()) {
|
||||
cerr << endl << ">> PRunMuMinus::PRunMuMinus: **SEVERE ERROR**: Couldn't prepare data for fitting!";
|
||||
cerr << endl << ">> This is very bad :-(, will quit ...";
|
||||
@@ -130,15 +136,7 @@ Double_t PRunMuMinus::CalcChiSquare(const std::vector<Double_t>& par)
|
||||
|
||||
// calculate chi square
|
||||
Double_t time(1.0);
|
||||
Int_t i, N(static_cast<Int_t>(fData.GetValue()->size()));
|
||||
|
||||
// In order not to have an IF in the next loop, determine the start and end bins for the fit range now
|
||||
Int_t startTimeBin = static_cast<Int_t>(ceil((fFitStartTime - fData.GetDataTimeStart())/fData.GetDataTimeStep()));
|
||||
if (startTimeBin < 0)
|
||||
startTimeBin = 0;
|
||||
Int_t endTimeBin = static_cast<Int_t>(floor((fFitEndTime - fData.GetDataTimeStart())/fData.GetDataTimeStep())) + 1;
|
||||
if (endTimeBin > N)
|
||||
endTimeBin = N;
|
||||
Int_t i;
|
||||
|
||||
// Calculate the theory function once to ensure one function evaluation for the current set of parameters.
|
||||
// This is needed for the LF and user functions where some non-thread-save calculations only need to be calculated once
|
||||
@@ -147,12 +145,12 @@ Double_t PRunMuMinus::CalcChiSquare(const std::vector<Double_t>& par)
|
||||
time = fTheory->Func(time, par, fFuncValues);
|
||||
|
||||
#ifdef HAVE_GOMP
|
||||
Int_t chunk = (endTimeBin - startTimeBin)/omp_get_num_procs();
|
||||
Int_t chunk = (fEndTimeBin - fStartTimeBin)/omp_get_num_procs();
|
||||
if (chunk < 10)
|
||||
chunk = 10;
|
||||
#pragma omp parallel for default(shared) private(i,time,diff) schedule(dynamic,chunk) reduction(+:chisq)
|
||||
#endif
|
||||
for (i=startTimeBin; i < endTimeBin; ++i) {
|
||||
for (i=fStartTimeBin; i < fEndTimeBin; ++i) {
|
||||
time = fData.GetDataTimeStart() + (Double_t)i*fData.GetDataTimeStep();
|
||||
diff = fData.GetValue()->at(i) - fTheory->Func(time, par, fFuncValues);
|
||||
chisq += diff*diff / (fData.GetError()->at(i)*fData.GetError()->at(i));
|
||||
@@ -186,15 +184,7 @@ Double_t PRunMuMinus::CalcChiSquareExpected(const std::vector<Double_t>& par)
|
||||
|
||||
// calculate chi square
|
||||
Double_t time(1.0);
|
||||
Int_t i, N(static_cast<Int_t>(fData.GetValue()->size()));
|
||||
|
||||
// In order not to have an IF in the next loop, determine the start and end bins for the fit range now
|
||||
Int_t startTimeBin = static_cast<Int_t>(ceil((fFitStartTime - fData.GetDataTimeStart())/fData.GetDataTimeStep()));
|
||||
if (startTimeBin < 0)
|
||||
startTimeBin = 0;
|
||||
Int_t endTimeBin = static_cast<Int_t>(floor((fFitEndTime - fData.GetDataTimeStart())/fData.GetDataTimeStep())) + 1;
|
||||
if (endTimeBin > N)
|
||||
endTimeBin = N;
|
||||
Int_t i;
|
||||
|
||||
// Calculate the theory function once to ensure one function evaluation for the current set of parameters.
|
||||
// This is needed for the LF and user functions where some non-thread-save calculations only need to be calculated once
|
||||
@@ -203,12 +193,12 @@ Double_t PRunMuMinus::CalcChiSquareExpected(const std::vector<Double_t>& par)
|
||||
time = fTheory->Func(time, par, fFuncValues);
|
||||
|
||||
#ifdef HAVE_GOMP
|
||||
Int_t chunk = (endTimeBin - startTimeBin)/omp_get_num_procs();
|
||||
Int_t chunk = (fEndTimeBin - fStartTimeBin)/omp_get_num_procs();
|
||||
if (chunk < 10)
|
||||
chunk = 10;
|
||||
#pragma omp parallel for default(shared) private(i,time,diff) schedule(dynamic,chunk) reduction(+:chisq)
|
||||
#endif
|
||||
for (i=startTimeBin; i < endTimeBin; ++i) {
|
||||
for (i=fStartTimeBin; i < fEndTimeBin; ++i) {
|
||||
time = fData.GetDataTimeStart() + (Double_t)i*fData.GetDataTimeStep();
|
||||
theo = fTheory->Func(time, par, fFuncValues);
|
||||
diff = fData.GetValue()->at(i) - theo;
|
||||
@@ -243,15 +233,7 @@ Double_t PRunMuMinus::CalcMaxLikelihood(const std::vector<Double_t>& par)
|
||||
Double_t theo;
|
||||
Double_t data;
|
||||
Double_t time(1.0);
|
||||
Int_t i, N(static_cast<Int_t>(fData.GetValue()->size()));
|
||||
|
||||
// In order not to have an IF in the next loop, determine the start and end bins for the fit range now
|
||||
Int_t startTimeBin = static_cast<Int_t>(ceil((fFitStartTime - fData.GetDataTimeStart())/fData.GetDataTimeStep()));
|
||||
if (startTimeBin < 0)
|
||||
startTimeBin = 0;
|
||||
Int_t endTimeBin = static_cast<Int_t>(floor((fFitEndTime - fData.GetDataTimeStart())/fData.GetDataTimeStep())) + 1;
|
||||
if (endTimeBin > N)
|
||||
endTimeBin = N;
|
||||
Int_t i;
|
||||
|
||||
// Calculate the theory function once to ensure one function evaluation for the current set of parameters.
|
||||
// This is needed for the LF and user functions where some non-thread-save calculations only need to be calculated once
|
||||
@@ -260,12 +242,12 @@ Double_t PRunMuMinus::CalcMaxLikelihood(const std::vector<Double_t>& par)
|
||||
time = fTheory->Func(time, par, fFuncValues);
|
||||
|
||||
#ifdef HAVE_GOMP
|
||||
Int_t chunk = (endTimeBin - startTimeBin)/omp_get_num_procs();
|
||||
Int_t chunk = (fEndTimeBin - fStartTimeBin)/omp_get_num_procs();
|
||||
if (chunk < 10)
|
||||
chunk = 10;
|
||||
#pragma omp parallel for default(shared) private(i,time,theo,data) schedule(dynamic,chunk) reduction(-:mllh)
|
||||
#endif
|
||||
for (i=startTimeBin; i < endTimeBin; ++i) {
|
||||
for (i=fStartTimeBin; i < fEndTimeBin; ++i) {
|
||||
time = fData.GetDataTimeStart() + (Double_t)i*fData.GetDataTimeStep();
|
||||
// calculate theory for the given parameter set
|
||||
theo = fTheory->Func(time, par, fFuncValues);
|
||||
@@ -401,15 +383,15 @@ void PRunMuMinus::SetFitRangeBin(const TString fitRange)
|
||||
void PRunMuMinus::CalcNoOfFitBins()
|
||||
{
|
||||
// In order not having to loop over all bins and to stay consistent with the chisq method, calculate the start and end bins explicitly
|
||||
Int_t startTimeBin = static_cast<Int_t>(ceil((fFitStartTime - fData.GetDataTimeStart())/fData.GetDataTimeStep()));
|
||||
if (startTimeBin < 0)
|
||||
startTimeBin = 0;
|
||||
Int_t endTimeBin = static_cast<Int_t>(floor((fFitEndTime - fData.GetDataTimeStart())/fData.GetDataTimeStep())) + 1;
|
||||
if (endTimeBin > static_cast<Int_t>(fData.GetValue()->size()))
|
||||
endTimeBin = fData.GetValue()->size();
|
||||
fStartTimeBin = static_cast<Int_t>(ceil((fFitStartTime - fData.GetDataTimeStart())/fData.GetDataTimeStep()));
|
||||
if (fStartTimeBin < 0)
|
||||
fStartTimeBin = 0;
|
||||
fEndTimeBin = static_cast<Int_t>(floor((fFitEndTime - fData.GetDataTimeStart())/fData.GetDataTimeStep())) + 1;
|
||||
if (fEndTimeBin > static_cast<Int_t>(fData.GetValue()->size()))
|
||||
fEndTimeBin = fData.GetValue()->size();
|
||||
|
||||
if (endTimeBin > startTimeBin)
|
||||
fNoOfFitBins = endTimeBin - startTimeBin;
|
||||
if (fEndTimeBin > fStartTimeBin)
|
||||
fNoOfFitBins = fEndTimeBin - fStartTimeBin;
|
||||
else
|
||||
fNoOfFitBins = 0;
|
||||
}
|
||||
|
||||
@@ -64,6 +64,9 @@ PRunSingleHisto::PRunSingleHisto() : PRunBase()
|
||||
// the fit range can be changed in the command block, these variables need to be accessible
|
||||
fGoodBins[0] = -1;
|
||||
fGoodBins[1] = -1;
|
||||
|
||||
fStartTimeBin = -1;
|
||||
fEndTimeBin = -1;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
@@ -81,6 +84,7 @@ PRunSingleHisto::PRunSingleHisto(PMsrHandler *msrInfo, PRunDataHandler *rawData,
|
||||
{
|
||||
fScaleN0AndBkg = IsScaleN0AndBkg();
|
||||
fNoOfFitBins = 0;
|
||||
fBackground = 0;
|
||||
|
||||
fPacking = fRunInfo->GetPacking();
|
||||
if (fPacking == -1) { // i.e. packing is NOT given in the RUN-block, it must be given in the GLOBAL-block
|
||||
@@ -99,6 +103,9 @@ PRunSingleHisto::PRunSingleHisto(PMsrHandler *msrInfo, PRunDataHandler *rawData,
|
||||
fGoodBins[0] = -1;
|
||||
fGoodBins[1] = -1;
|
||||
|
||||
fStartTimeBin = -1;
|
||||
fEndTimeBin = -1;
|
||||
|
||||
if (!PrepareData()) {
|
||||
cerr << endl << ">> PRunSingleHisto::PRunSingleHisto: **SEVERE ERROR**: Couldn't prepare data for fitting!";
|
||||
cerr << endl << ">> This is very bad :-(, will quit ...";
|
||||
@@ -173,15 +180,7 @@ Double_t PRunSingleHisto::CalcChiSquare(const std::vector<Double_t>& par)
|
||||
|
||||
// calculate chi square
|
||||
Double_t time(1.0);
|
||||
Int_t i, N(static_cast<Int_t>(fData.GetValue()->size()));
|
||||
|
||||
// In order not to have an IF in the next loop, determine the start and end bins for the fit range now
|
||||
Int_t startTimeBin = static_cast<Int_t>(ceil((fFitStartTime - fData.GetDataTimeStart())/fData.GetDataTimeStep()));
|
||||
if (startTimeBin < 0)
|
||||
startTimeBin = 0;
|
||||
Int_t endTimeBin = static_cast<Int_t>(floor((fFitEndTime - fData.GetDataTimeStart())/fData.GetDataTimeStep())) + 1;
|
||||
if (endTimeBin > N)
|
||||
endTimeBin = N;
|
||||
Int_t i;
|
||||
|
||||
// Calculate the theory function once to ensure one function evaluation for the current set of parameters.
|
||||
// This is needed for the LF and user functions where some non-thread-save calculations only need to be calculated once
|
||||
@@ -190,12 +189,12 @@ Double_t PRunSingleHisto::CalcChiSquare(const std::vector<Double_t>& par)
|
||||
time = fTheory->Func(time, par, fFuncValues);
|
||||
|
||||
#ifdef HAVE_GOMP
|
||||
Int_t chunk = (endTimeBin - startTimeBin)/omp_get_num_procs();
|
||||
Int_t chunk = (fEndTimeBin - fStartTimeBin)/omp_get_num_procs();
|
||||
if (chunk < 10)
|
||||
chunk = 10;
|
||||
#pragma omp parallel for default(shared) private(i,time,diff) schedule(dynamic,chunk) reduction(+:chisq)
|
||||
#endif
|
||||
for (i=startTimeBin; i < endTimeBin; ++i) {
|
||||
for (i=fStartTimeBin; i<fEndTimeBin; ++i) {
|
||||
time = fData.GetDataTimeStart() + (Double_t)i*fData.GetDataTimeStep();
|
||||
diff = fData.GetValue()->at(i) -
|
||||
(N0*TMath::Exp(-time/tau)*(1.0+fTheory->Func(time, par, fFuncValues))+bkg);
|
||||
@@ -266,15 +265,7 @@ Double_t PRunSingleHisto::CalcChiSquareExpected(const std::vector<Double_t>& par
|
||||
|
||||
// calculate chi square
|
||||
Double_t time(1.0);
|
||||
Int_t i, N(static_cast<Int_t>(fData.GetValue()->size()));
|
||||
|
||||
// In order not to have an IF in the next loop, determine the start and end bins for the fit range now
|
||||
Int_t startTimeBin = static_cast<Int_t>(ceil((fFitStartTime - fData.GetDataTimeStart())/fData.GetDataTimeStep()));
|
||||
if (startTimeBin < 0)
|
||||
startTimeBin = 0;
|
||||
Int_t endTimeBin = static_cast<Int_t>(floor((fFitEndTime - fData.GetDataTimeStart())/fData.GetDataTimeStep())) + 1;
|
||||
if (endTimeBin > N)
|
||||
endTimeBin = N;
|
||||
Int_t i;
|
||||
|
||||
// Calculate the theory function once to ensure one function evaluation for the current set of parameters.
|
||||
// This is needed for the LF and user functions where some non-thread-save calculations only need to be calculated once
|
||||
@@ -283,12 +274,12 @@ Double_t PRunSingleHisto::CalcChiSquareExpected(const std::vector<Double_t>& par
|
||||
time = fTheory->Func(time, par, fFuncValues);
|
||||
|
||||
#ifdef HAVE_GOMP
|
||||
Int_t chunk = (endTimeBin - startTimeBin)/omp_get_num_procs();
|
||||
Int_t chunk = (fEndTimeBin - fStartTimeBin)/omp_get_num_procs();
|
||||
if (chunk < 10)
|
||||
chunk = 10;
|
||||
#pragma omp parallel for default(shared) private(i,time,diff) schedule(dynamic,chunk) reduction(+:chisq)
|
||||
#endif
|
||||
for (i=startTimeBin; i < endTimeBin; ++i) {
|
||||
for (i=fStartTimeBin; i < fEndTimeBin; ++i) {
|
||||
time = fData.GetDataTimeStart() + (Double_t)i*fData.GetDataTimeStep();
|
||||
theo = N0*TMath::Exp(-time/tau)*(1.0+fTheory->Func(time, par, fFuncValues))+bkg;
|
||||
diff = fData.GetValue()->at(i) - theo;
|
||||
@@ -359,7 +350,7 @@ Double_t PRunSingleHisto::CalcMaxLikelihood(const std::vector<Double_t>& par)
|
||||
Double_t theo;
|
||||
Double_t data;
|
||||
Double_t time(1.0);
|
||||
Int_t i, N(static_cast<Int_t>(fData.GetValue()->size()));
|
||||
Int_t i;
|
||||
|
||||
// norm is needed since there is no simple scaling like in chisq case to get the correct Max.Log.Likelihood value when normlizing N(t) to 1/ns
|
||||
Double_t normalizer = 1.0;
|
||||
@@ -367,14 +358,6 @@ Double_t PRunSingleHisto::CalcMaxLikelihood(const std::vector<Double_t>& par)
|
||||
if (fScaleN0AndBkg)
|
||||
normalizer = fPacking * (fTimeResolution * 1.0e3);
|
||||
|
||||
// In order not to have an IF in the next loop, determine the start and end bins for the fit range now
|
||||
Int_t startTimeBin = static_cast<Int_t>(ceil((fFitStartTime - fData.GetDataTimeStart())/fData.GetDataTimeStep()));
|
||||
if (startTimeBin < 0)
|
||||
startTimeBin = 0;
|
||||
Int_t endTimeBin = static_cast<Int_t>(floor((fFitEndTime - fData.GetDataTimeStart())/fData.GetDataTimeStep())) + 1;
|
||||
if (endTimeBin > N)
|
||||
endTimeBin = N;
|
||||
|
||||
// Calculate the theory function once to ensure one function evaluation for the current set of parameters.
|
||||
// This is needed for the LF and user functions where some non-thread-save calculations only need to be calculated once
|
||||
// for a given set of parameters---which should be done outside of the parallelized loop.
|
||||
@@ -382,12 +365,12 @@ Double_t PRunSingleHisto::CalcMaxLikelihood(const std::vector<Double_t>& par)
|
||||
time = fTheory->Func(time, par, fFuncValues);
|
||||
|
||||
#ifdef HAVE_GOMP
|
||||
Int_t chunk = (endTimeBin - startTimeBin)/omp_get_num_procs();
|
||||
Int_t chunk = (fEndTimeBin - fStartTimeBin)/omp_get_num_procs();
|
||||
if (chunk < 10)
|
||||
chunk = 10;
|
||||
#pragma omp parallel for default(shared) private(i,time,theo,data) schedule(dynamic,chunk) reduction(-:mllh)
|
||||
#endif
|
||||
for (i=startTimeBin; i<endTimeBin; ++i) {
|
||||
for (i=fStartTimeBin; i<fEndTimeBin; ++i) {
|
||||
time = fData.GetDataTimeStart() + (Double_t)i*fData.GetDataTimeStep();
|
||||
// calculate theory for the given parameter set
|
||||
theo = N0*TMath::Exp(-time/tau)*(1.0+fTheory->Func(time, par, fFuncValues))+bkg;
|
||||
@@ -588,15 +571,15 @@ void PRunSingleHisto::SetFitRangeBin(const TString fitRange)
|
||||
void PRunSingleHisto::CalcNoOfFitBins()
|
||||
{
|
||||
// In order not having to loop over all bins and to stay consistent with the chisq method, calculate the start and end bins explicitly
|
||||
Int_t startTimeBin = static_cast<Int_t>(ceil((fFitStartTime - fData.GetDataTimeStart())/fData.GetDataTimeStep()));
|
||||
if (startTimeBin < 0)
|
||||
startTimeBin = 0;
|
||||
Int_t endTimeBin = static_cast<Int_t>(floor((fFitEndTime - fData.GetDataTimeStart())/fData.GetDataTimeStep())) + 1;
|
||||
if (endTimeBin > static_cast<Int_t>(fData.GetValue()->size()))
|
||||
endTimeBin = fData.GetValue()->size();
|
||||
fStartTimeBin = static_cast<Int_t>(ceil((fFitStartTime - fData.GetDataTimeStart())/fData.GetDataTimeStep()));
|
||||
if (fStartTimeBin < 0)
|
||||
fStartTimeBin = 0;
|
||||
fEndTimeBin = static_cast<Int_t>(floor((fFitEndTime - fData.GetDataTimeStart())/fData.GetDataTimeStep())) + 1;
|
||||
if (fEndTimeBin > static_cast<Int_t>(fData.GetValue()->size()))
|
||||
fEndTimeBin = fData.GetValue()->size();
|
||||
|
||||
if (endTimeBin > startTimeBin)
|
||||
fNoOfFitBins = endTimeBin - startTimeBin;
|
||||
if (fEndTimeBin > fStartTimeBin)
|
||||
fNoOfFitBins = fEndTimeBin - fStartTimeBin;
|
||||
else
|
||||
fNoOfFitBins = 0;
|
||||
}
|
||||
@@ -1555,7 +1538,7 @@ void PRunSingleHisto::EstimateN0()
|
||||
if (paramNo > 10000) // i.e. fun or map
|
||||
return;
|
||||
|
||||
// still missing: set this value in the parameters
|
||||
// get the parameters
|
||||
PMsrParamList *param = fMsrInfo->GetMsrParamList();
|
||||
assert(param);
|
||||
|
||||
@@ -1564,6 +1547,11 @@ void PRunSingleHisto::EstimateN0()
|
||||
return;
|
||||
}
|
||||
|
||||
// check if N0 is fixed. If this is the case, do NOT estimate N0
|
||||
if (param->at(paramNo-1).fStep == 0.0) // N0 parameter fixed
|
||||
return;
|
||||
|
||||
|
||||
// check that 'backgr.fit' in the msr-file run block is indeed a parameter number.
|
||||
// in case it is a function, nothing will be done.
|
||||
Int_t paramNoBkg = fRunInfo->GetBkgFitParamNo();
|
||||
|
||||
@@ -507,6 +507,10 @@ Double_t PTheory::Func(register Double_t t, const PDoubleVector& paramValues, co
|
||||
return Polynom(t, paramValues, funcValues) * fMul->Func(t, paramValues, funcValues) +
|
||||
fAdd->Func(t, paramValues, funcValues);
|
||||
break;
|
||||
case THEORY_MU_MINUS_EXP:
|
||||
return MuMinusExpTF(t, paramValues, funcValues) * fMul->Func(t, paramValues, funcValues) +
|
||||
fAdd->Func(t, paramValues, funcValues);
|
||||
break;
|
||||
case THEORY_USER_FCN:
|
||||
return UserFcn(t, paramValues, funcValues) * fMul->Func(t, paramValues, funcValues) +
|
||||
fAdd->Func(t, paramValues, funcValues);
|
||||
@@ -599,6 +603,9 @@ Double_t PTheory::Func(register Double_t t, const PDoubleVector& paramValues, co
|
||||
case THEORY_DYNAMIC_TF_NK:
|
||||
return DynamicNKTF (t, paramValues, funcValues) * fMul->Func(t, paramValues, funcValues);
|
||||
break;
|
||||
case THEORY_MU_MINUS_EXP:
|
||||
return MuMinusExpTF(t, paramValues, funcValues) * fMul->Func(t, paramValues, funcValues);
|
||||
break;
|
||||
case THEORY_POLYNOM:
|
||||
return Polynom(t, paramValues, funcValues) * fMul->Func(t, paramValues, funcValues);
|
||||
break;
|
||||
@@ -695,6 +702,9 @@ Double_t PTheory::Func(register Double_t t, const PDoubleVector& paramValues, co
|
||||
case THEORY_DYNAMIC_TF_NK:
|
||||
return DynamicNKTF (t, paramValues, funcValues) + fAdd->Func(t, paramValues, funcValues);
|
||||
break;
|
||||
case THEORY_MU_MINUS_EXP:
|
||||
return MuMinusExpTF(t, paramValues, funcValues) + fAdd->Func(t, paramValues, funcValues);
|
||||
break;
|
||||
case THEORY_POLYNOM:
|
||||
return Polynom(t, paramValues, funcValues) + fAdd->Func(t, paramValues, funcValues);
|
||||
break;
|
||||
@@ -789,6 +799,9 @@ Double_t PTheory::Func(register Double_t t, const PDoubleVector& paramValues, co
|
||||
case THEORY_DYNAMIC_TF_NK:
|
||||
return DynamicNKTF(t, paramValues, funcValues);
|
||||
break;
|
||||
case THEORY_MU_MINUS_EXP:
|
||||
return MuMinusExpTF(t, paramValues, funcValues);
|
||||
break;
|
||||
case THEORY_POLYNOM:
|
||||
return Polynom(t, paramValues, funcValues);
|
||||
break;
|
||||
@@ -2942,6 +2955,46 @@ Double_t PTheory::GetDynKTLFValue(const Double_t t) const
|
||||
return fDynLFFuncValue[idx]+df;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
/**
|
||||
* <p> theory function: MuMinusExpTF
|
||||
*
|
||||
* \f[ = N_0 \exp(-t/tau) [1 + A \exp(-\lambda t) \cos(2\pi\nu t + \phi)] \f]
|
||||
*
|
||||
* <b>meaning of paramValues:</b> \f$t_{\rm shift}\f$, \f$N_0\f$, \f$\tau\f$, \f$A\f$, \f$\lambda\f$, \f$\phi\f$, \f$\nu\f$
|
||||
*
|
||||
* <b>return:</b> function value
|
||||
*
|
||||
* \param t time in \f$(\mu\mathrm{s})\f$, or x-axis value for non-muSR fit
|
||||
* \param paramValues parameter values
|
||||
* \param funcValues vector with the functions (i.e. functions of the parameters)
|
||||
*/
|
||||
Double_t PTheory::MuMinusExpTF(register Double_t t, const PDoubleVector& paramValues, const PDoubleVector& funcValues) const
|
||||
{
|
||||
// expected parameters: N0 tau A lambda phase frequency [tshift]
|
||||
|
||||
Double_t val[7];
|
||||
|
||||
assert(fParamNo.size() <= 7);
|
||||
|
||||
// check if FUNCTIONS are used
|
||||
for (UInt_t i=0; i<fParamNo.size(); i++) {
|
||||
if (fParamNo[i] < MSR_PARAM_FUN_OFFSET) { // parameter or resolved map
|
||||
val[i] = paramValues[fParamNo[i]];
|
||||
} else { // function
|
||||
val[i] = funcValues[fParamNo[i]-MSR_PARAM_FUN_OFFSET];
|
||||
}
|
||||
}
|
||||
|
||||
Double_t tt;
|
||||
if (fParamNo.size() == 6) // no tshift
|
||||
tt = t;
|
||||
else // tshift present
|
||||
tt = t-val[6];
|
||||
|
||||
return val[0]*exp(-tt/val[1])*(1.0+val[2]*exp(-val[3]*tt)*cos(TWO_PI*val[5]*tt+DEG_TO_RAD*val[4]));
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
// END
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
Reference in New Issue
Block a user