added negative muon fitting support. Fixed at the same time bugs related to logx/logy plotting. Allow now comments after functions in the FUNCTION block.

This commit is contained in:
suter_a 2012-04-24 10:52:20 +00:00
parent e9f6fcab80
commit 99c24a146d
16 changed files with 817 additions and 173 deletions

View File

@ -4,6 +4,12 @@
# $Id$
#---------------------------------------------------------------------
changes since 0.11.0
===================================
NEW 2012-04-24 added a first version for negative muon fitting. At the same
time substaintial bug fixing has been carried out (mainly the
logx/logy handling).
changes since 0.10.0
===================================
NEW 2012-04-15 added timeout facility for musrfit, musrview, musrt0. Adopted musredit/musrgui accordingly.

View File

@ -1,7 +1,7 @@
AC_REVISION([$Id$])
AC_PREREQ(2.59)
AC_INIT(musrfit, 0.10.0, andreas.suter@psi.ch)
AC_INIT(musrfit, 0.11.0, andreas.suter@psi.ch)
AC_CONFIG_AUX_DIR(admin)
AC_CANONICAL_HOST
#AC_MSG_RESULT([${host} ${host_cpu} ${host_vendor} ${host_os}])
@ -35,7 +35,7 @@ dnl -----------------------------------------------
#release versioning
MUSR_MAJOR_VERSION=0
MUSR_MINOR_VERSION=10
MUSR_MINOR_VERSION=11
MUSR_MICRO_VERSION=0
#release versioning

View File

@ -1,30 +0,0 @@
*************************************************************
Build instructions for the shared library libPMusr.so
Andreas Suter, 2008/01/08
$Id$
*************************************************************
At the moment a lot of the build stuff has to be done manually.
I will eventually migrate to cmake and than it will be much more
automatically.
Required packages:
* ROOT >= 5.16 : needed for read/write some data files, plotting, ...
ROOT needs to be compiled with Minuit2 enabled
(configure ... --enable-minuit2 ...)
* Spirit >= 1.8.5 : needed for parsing (this is part of the boost lib, and
hence installed on most linux machines). The boost libs
are platform independent and hence it should be easy in
the future to migrate also to Windows and Mac OS
Build instructions:
make -f Makefile.PMusr
than as root
make -f Makefile.PMusr install

View File

@ -57,6 +57,7 @@ PFunctionHandler::~PFunctionHandler()
{
fLines.clear();
fFuncs.clear();
fFuncComment.clear();
}
//-------------------------------------------------------------
@ -74,8 +75,20 @@ Bool_t PFunctionHandler::DoParse()
// feed the function block into the parser. Start with i=1, since i=0 is FUNCTIONS
for (UInt_t i=1; i<fLines.size(); i++) {
// function line to upper case
// function line to upper case after cutting out prepended comment
line = fLines[i].fLine;
Ssiz_t pos = line.First('#'); // find prepended comment
TString Comment("");
if (pos != kNPOS) { // comment present
for (Int_t i=pos; i<line.Length(); i++) {
Comment += line[i];
}
}
fFuncComment.push_back(Comment);
if (pos != kNPOS) { // comment present, hence remove it from the string to be parsed
line.Remove(pos);
line.Remove(TString::kTrailing, ' ');
}
line.ToUpper();
// do parsing
@ -205,10 +218,17 @@ Int_t PFunctionHandler::GetFuncIndex(Int_t funcNo)
*
* \param idx index of the function
*/
TString* PFunctionHandler::GetFuncString(UInt_t idx)
TString PFunctionHandler::GetFuncString(UInt_t idx)
{
if (idx > fFuncs.size())
return 0;
TString funStr("");
return fFuncs[idx].GetFuncString();
if ((idx > fFuncs.size()) || (idx > fFuncComment.size()))
return funStr;
if (fFuncComment[idx].Length() > 0)
funStr = *fFuncs[idx].GetFuncString() + " " + fFuncComment[idx];
else
funStr = *fFuncs[idx].GetFuncString();
return funStr;
}

View File

@ -581,7 +581,7 @@ Int_t PMsrHandler::WriteMsrLogFile(const Bool_t messages)
sstr.Remove(TString::kLeading, ' ');
if (str.BeginsWith("fun")) {
if (FilterNumber(sstr, "fun", 0, number)) {
sstr = *fFuncHandler->GetFuncString(number-1);
sstr = fFuncHandler->GetFuncString(number-1);
sstr.ToLower();
fout << sstr.Data() << endl;
}
@ -3361,10 +3361,8 @@ Bool_t PMsrHandler::HandlePlotEntry(PMsrLines &lines)
PMsrLines::iterator iter1;
PMsrLines::iterator iter2;
TObjArray *tokens = 0;
TObjArray *tokens2 = 0;
TObjString *ostr = 0;
TString str;
TString str2;
if (lines.empty()) {
cerr << endl << "**WARNING**: There is no PLOT block! Do you really want this?";
@ -3433,6 +3431,7 @@ Bool_t PMsrHandler::HandlePlotEntry(PMsrLines &lines)
case MSR_PLOT_SINGLE_HISTO: // like: runs 1 5 13
case MSR_PLOT_ASYM:
case MSR_PLOT_NON_MUSR:
case MSR_PLOT_MU_MINUS:
tokens = iter1->fLine.Tokenize(" \t");
if (!tokens) {
cerr << endl << ">> PMsrHandler::HandlePlotEntry: **SEVERE ERROR** Couldn't tokenize PLOT in line " << iter1->fLineNo;
@ -3459,46 +3458,6 @@ Bool_t PMsrHandler::HandlePlotEntry(PMsrLines &lines)
tokens = 0;
}
break;
case MSR_PLOT_MU_MINUS: // like: runs 1,1 1,2
tokens = iter1->fLine.Tokenize(" \t");
if (!tokens) {
cerr << endl << ">> PMsrHandler::HandlePlotEntry: **SEVERE ERROR** Couldn't tokenize PLOT in line " << iter1->fLineNo;
cerr << endl << endl;
return false;
}
if (tokens->GetEntries() < 2) { // runs missing
error = true;
} else {
for (Int_t i=1; i<tokens->GetEntries(); i++) {
ostr = dynamic_cast<TObjString*>(tokens->At(i)); // something like 1,2
str = ostr->GetString();
tokens2 = str.Tokenize(",");
if (!tokens2) {
error = true;
} else {
ostr = dynamic_cast<TObjString*>(tokens2->At(0)); // real part
str = ostr->GetString();
ostr = dynamic_cast<TObjString*>(tokens2->At(1)); // imag part
str2 = ostr->GetString();
if (str.IsDigit() && str2.IsDigit()) {
run = TComplex(str.Atoi(),str2.Atoi());
param.fRuns.push_back(run);
} else {
error = true;
}
}
if (tokens2) {
delete tokens2;
tokens2 = 0;
}
}
}
// clean up
if (tokens) {
delete tokens;
tokens = 0;
}
break;
default:
error = true;
break;

View File

@ -3059,11 +3059,6 @@ void PMusrCanvas::PlotData(Bool_t unzoom)
{
fDataTheoryPad->cd();
if (fMsrHandler->GetMsrPlotList()->at(fPlotNumber).fLogX)
fDataTheoryPad->SetLogx(1);
if (fMsrHandler->GetMsrPlotList()->at(fPlotNumber).fLogY)
fDataTheoryPad->SetLogy(1);
if (!fBatchMode) {
// uncheck fourier menu entries
fPopupFourier->UnCheckEntries();
@ -3109,8 +3104,14 @@ void PMusrCanvas::PlotData(Bool_t unzoom)
dataYmax = GetMaximum(fData[i].data, dataXmin, dataXmax);
}
Double_t dd = 0.05*fabs(dataYmax-dataYmin);
dataYmin -= dd;
dataYmax += dd;
if (!fMsrHandler->GetMsrPlotList()->at(fPlotNumber).fLogY) {
dataYmin -= dd;
dataYmax += dd;
} else {
if (dataYmin < 0)
dataYmin = 0.1;
dataYmax += dd;
}
}
} else { // set the x-/y-range to the previous fHistoFrame range
dataXmin = xmin;
@ -3128,8 +3129,14 @@ void PMusrCanvas::PlotData(Bool_t unzoom)
dataYmax = GetMaximum(fData[i].data, dataXmin, dataXmax);
}
Double_t dd = 0.05*fabs(dataYmax-dataYmin);
dataYmin -= dd;
dataYmax += dd;
if (!fMsrHandler->GetMsrPlotList()->at(fPlotNumber).fLogY) {
dataYmin -= dd;
dataYmax += dd;
} else {
if (dataYmin < 0)
dataYmin = 0.1;
dataYmax += dd;
}
}
}
@ -3153,6 +3160,11 @@ void PMusrCanvas::PlotData(Bool_t unzoom)
fData[i].theory->GetYaxis()->SetRangeUser(dataYmin, dataYmax);
}
if (fMsrHandler->GetMsrPlotList()->at(fPlotNumber).fLogX)
fDataTheoryPad->SetLogx(1);
if (fMsrHandler->GetMsrPlotList()->at(fPlotNumber).fLogY)
fDataTheoryPad->SetLogy(1);
// set x-axis label
fHistoFrame->GetXaxis()->SetTitle("time (#mus)");
// set y-axis label
@ -3173,7 +3185,7 @@ void PMusrCanvas::PlotData(Bool_t unzoom)
yAxisTitle = "asymmetry";
break;
case MSR_PLOT_MU_MINUS:
yAxisTitle = "??";
yAxisTitle = "N(t) per bin";
break;
default:
yAxisTitle = "??";
@ -3244,8 +3256,14 @@ void PMusrCanvas::PlotData(Bool_t unzoom)
dataYmax = GetMaximum(fNonMusrData[i].data, dataXmin, dataXmax);
}
Double_t dd = 0.05*fabs(dataYmax-dataYmin);
dataYmin -= dd;
dataYmax += dd;
if (!fMsrHandler->GetMsrPlotList()->at(fPlotNumber).fLogY) {
dataYmin -= dd;
dataYmax += dd;
} else {
if (dataYmin < 0)
dataYmin = 0.1;
dataYmax += dd;
}
}
} else { // set the x-/y-range to the previous fHistoFrame range
dataXmin = xmin;
@ -3263,8 +3281,14 @@ void PMusrCanvas::PlotData(Bool_t unzoom)
dataYmax = GetMaximum(fNonMusrData[i].data, dataXmin, dataXmax);
}
Double_t dd = 0.05*fabs(dataYmax-dataYmin);
dataYmin -= dd;
dataYmax += dd;
if (!fMsrHandler->GetMsrPlotList()->at(fPlotNumber).fLogY) {
dataYmin -= dd;
dataYmax += dd;
} else {
if (dataYmin < 0)
dataYmin = 0.1;
dataYmax += dd;
}
}
}
@ -3289,6 +3313,11 @@ void PMusrCanvas::PlotData(Bool_t unzoom)
fMultiGraphData->Add(ge, "l");
}
if (fMsrHandler->GetMsrPlotList()->at(fPlotNumber).fLogX)
fDataTheoryPad->SetLogx(1);
if (fMsrHandler->GetMsrPlotList()->at(fPlotNumber).fLogY)
fDataTheoryPad->SetLogy(1);
fMultiGraphData->Draw("a");
// set x/y-range
@ -3345,6 +3374,12 @@ void PMusrCanvas::PlotDifference(Bool_t unzoom)
{
fDataTheoryPad->cd();
// check if log scale plotting and if yes switch back to linear
if (fMsrHandler->GetMsrPlotList()->at(fPlotNumber).fLogY)
fDataTheoryPad->SetLogy(0); // switch to linear
if (fMsrHandler->GetMsrPlotList()->at(fPlotNumber).fLogX)
fDataTheoryPad->SetLogx(0); // switch to linear
if (fPlotType < 0) // plot type not defined
return;
@ -3543,6 +3578,12 @@ void PMusrCanvas::PlotFourier(Bool_t unzoom)
{
fDataTheoryPad->cd();
// check if log scale plotting and if yes switch back to linear
if (fMsrHandler->GetMsrPlotList()->at(fPlotNumber).fLogY)
fDataTheoryPad->SetLogy(0); // switch to linear
if (fMsrHandler->GetMsrPlotList()->at(fPlotNumber).fLogX)
fDataTheoryPad->SetLogx(0); // switch to linear
if (fPlotType < 0) // plot type not defined
return;
@ -3976,6 +4017,12 @@ void PMusrCanvas::PlotFourierDifference(Bool_t unzoom)
{
fDataTheoryPad->cd();
// check if log scale plotting and if yes switch back to linear
if (fMsrHandler->GetMsrPlotList()->at(fPlotNumber).fLogY)
fDataTheoryPad->SetLogy(0); // switch to linear
if (fMsrHandler->GetMsrPlotList()->at(fPlotNumber).fLogX)
fDataTheoryPad->SetLogx(0); // switch to linear
if (fPlotType < 0) // plot type not defined
return;

View File

@ -611,6 +611,8 @@ PRunData* PRunListCollection::GetMuMinus(UInt_t index, EDataSwitch tag)
cerr << endl;
return 0;
}
fRunMuMinusList[index]->CalcTheory();
data = fRunMuMinusList[index]->GetData();
break;
case kRunNo:
for (UInt_t i=0; i<fRunMuMinusList.size(); i++) {

View File

@ -59,11 +59,13 @@ PRunMuMinus::PRunMuMinus() : PRunBase()
*/
PRunMuMinus::PRunMuMinus(PMsrHandler *msrInfo, PRunDataHandler *rawData, UInt_t runNo, EPMusrHandleTag tag) : PRunBase(msrInfo, rawData, runNo, tag)
{
Bool_t success;
fNoOfFitBins = 0;
// calculate fFitData
if (success) {
success = PrepareData();
if (!PrepareData()) {
cerr << endl << ">> PRunMuMinus::PRunMuMinus: **SEVERE ERROR**: Couldn't prepare data for fitting!";
cerr << endl << ">> This is very bad :-(, will quit ...";
cerr << endl;
fValid = false;
}
}
@ -75,6 +77,7 @@ PRunMuMinus::PRunMuMinus(PMsrHandler *msrInfo, PRunDataHandler *rawData, UInt_t
*/
PRunMuMinus::~PRunMuMinus()
{
fForward.clear();
}
//--------------------------------------------------------------------------
@ -93,6 +96,42 @@ Double_t PRunMuMinus::CalcChiSquare(const std::vector<Double_t>& par)
Double_t chisq = 0.0;
Double_t diff = 0.0;
// calculate functions
for (Int_t i=0; i<fMsrInfo->GetNoOfFuncs(); i++) {
Int_t funcNo = fMsrInfo->GetFuncNo(i);
fFuncValues[i] = fMsrInfo->EvalFunc(funcNo, *fRunInfo->GetMap(), par);
}
// calculate chi square
Double_t time(1.0);
Int_t i, N(static_cast<Int_t>(fData.GetValue()->size()));
// In order not to have an IF in the next loop, determine the start and end bins for the fit range now
Int_t startTimeBin = static_cast<Int_t>(ceil((fFitStartTime - fData.GetDataTimeStart())/fData.GetDataTimeStep()));
if (startTimeBin < 0)
startTimeBin = 0;
Int_t endTimeBin = static_cast<Int_t>(floor((fFitEndTime - fData.GetDataTimeStart())/fData.GetDataTimeStep())) + 1;
if (endTimeBin > N)
endTimeBin = N;
// Calculate the theory function once to ensure one function evaluation for the current set of parameters.
// This is needed for the LF and user functions where some non-thread-save calculations only need to be calculated once
// for a given set of parameters---which should be done outside of the parallelized loop.
// For all other functions it means a tiny and acceptable overhead.
time = fTheory->Func(time, par, fFuncValues);
#ifdef HAVE_GOMP
Int_t chunk = (endTimeBin - startTimeBin)/omp_get_num_procs();
if (chunk < 10)
chunk = 10;
#pragma omp parallel for default(shared) private(i,time,diff) schedule(dynamic,chunk) reduction(+:chisq)
#endif
for (i=startTimeBin; i < endTimeBin; ++i) {
time = fData.GetDataTimeStart() + (Double_t)i*fData.GetDataTimeStep();
diff = fData.GetValue()->at(i) - fTheory->Func(time, par, fFuncValues);
chisq += diff*diff / (fData.GetError()->at(i)*fData.GetError()->at(i));
}
return chisq;
}
@ -109,6 +148,47 @@ Double_t PRunMuMinus::CalcChiSquare(const std::vector<Double_t>& par)
*/
Double_t PRunMuMinus::CalcChiSquareExpected(const std::vector<Double_t>& par)
{
Double_t chisq = 0.0;
Double_t diff = 0.0;
Double_t theo = 0.0;
// calculate functions
for (Int_t i=0; i<fMsrInfo->GetNoOfFuncs(); i++) {
Int_t funcNo = fMsrInfo->GetFuncNo(i);
fFuncValues[i] = fMsrInfo->EvalFunc(funcNo, *fRunInfo->GetMap(), par);
}
// calculate chi square
Double_t time(1.0);
Int_t i, N(static_cast<Int_t>(fData.GetValue()->size()));
// In order not to have an IF in the next loop, determine the start and end bins for the fit range now
Int_t startTimeBin = static_cast<Int_t>(ceil((fFitStartTime - fData.GetDataTimeStart())/fData.GetDataTimeStep()));
if (startTimeBin < 0)
startTimeBin = 0;
Int_t endTimeBin = static_cast<Int_t>(floor((fFitEndTime - fData.GetDataTimeStart())/fData.GetDataTimeStep())) + 1;
if (endTimeBin > N)
endTimeBin = N;
// Calculate the theory function once to ensure one function evaluation for the current set of parameters.
// This is needed for the LF and user functions where some non-thread-save calculations only need to be calculated once
// for a given set of parameters---which should be done outside of the parallelized loop.
// For all other functions it means a tiny and acceptable overhead.
time = fTheory->Func(time, par, fFuncValues);
#ifdef HAVE_GOMP
Int_t chunk = (endTimeBin - startTimeBin)/omp_get_num_procs();
if (chunk < 10)
chunk = 10;
#pragma omp parallel for default(shared) private(i,time,diff) schedule(dynamic,chunk) reduction(+:chisq)
#endif
for (i=startTimeBin; i < endTimeBin; ++i) {
time = fData.GetDataTimeStart() + (Double_t)i*fData.GetDataTimeStep();
theo = fTheory->Func(time, par, fFuncValues);
diff = fData.GetValue()->at(i) - theo;
chisq += diff*diff / theo;
}
return 0.0;
}
@ -125,9 +205,54 @@ Double_t PRunMuMinus::CalcChiSquareExpected(const std::vector<Double_t>& par)
*/
Double_t PRunMuMinus::CalcMaxLikelihood(const std::vector<Double_t>& par)
{
cout << endl << "PRunSingleHisto::CalcMaxLikelihood(): not implemented yet ..." << endl;
Double_t mllh = 0.0; // maximum log likelihood assuming poisson distribution for the single bin
return 1.0;
// calculate functions
for (Int_t i=0; i<fMsrInfo->GetNoOfFuncs(); i++) {
Int_t funcNo = fMsrInfo->GetFuncNo(i);
fFuncValues[i] = fMsrInfo->EvalFunc(funcNo, *fRunInfo->GetMap(), par);
}
// calculate maximum log likelihood
Double_t theo;
Double_t data;
Double_t time(1.0);
Int_t i, N(static_cast<Int_t>(fData.GetValue()->size()));
// In order not to have an IF in the next loop, determine the start and end bins for the fit range now
Int_t startTimeBin = static_cast<Int_t>(ceil((fFitStartTime - fData.GetDataTimeStart())/fData.GetDataTimeStep()));
if (startTimeBin < 0)
startTimeBin = 0;
Int_t endTimeBin = static_cast<Int_t>(floor((fFitEndTime - fData.GetDataTimeStart())/fData.GetDataTimeStep())) + 1;
if (endTimeBin > N)
endTimeBin = N;
// Calculate the theory function once to ensure one function evaluation for the current set of parameters.
// This is needed for the LF and user functions where some non-thread-save calculations only need to be calculated once
// for a given set of parameters---which should be done outside of the parallelized loop.
// For all other functions it means a tiny and acceptable overhead.
time = fTheory->Func(time, par, fFuncValues);
#ifdef HAVE_GOMP
Int_t chunk = (endTimeBin - startTimeBin)/omp_get_num_procs();
if (chunk < 10)
chunk = 10;
#pragma omp parallel for default(shared) private(i,time,theo,data) schedule(dynamic,chunk) reduction(-:mllh)
#endif
for (i=startTimeBin; i < endTimeBin; ++i) {
time = fData.GetDataTimeStart() + (Double_t)i*fData.GetDataTimeStep();
// calculate theory for the given parameter set
theo = fTheory->Func(time, par, fFuncValues);
// check if data value is not too small
if (fData.GetValue()->at(i) > 1.0e-9)
data = fData.GetValue()->at(i);
else
data = 1.0e-9;
// add maximum log likelihood contribution of bin i
mllh -= data*TMath::Log(theo) - theo - TMath::LnGamma(data+1);
}
return mllh;
}
//--------------------------------------------------------------------------
@ -140,9 +265,33 @@ Double_t PRunMuMinus::CalcMaxLikelihood(const std::vector<Double_t>& par)
*/
UInt_t PRunMuMinus::GetNoOfFitBins()
{
CalcNoOfFitBins();
return fNoOfFitBins;
}
//--------------------------------------------------------------------------
// CalcNoOfFitBins (private)
//--------------------------------------------------------------------------
/**
* <p>Calculate the number of fitted bins for the current fit range.
*/
void PRunMuMinus::CalcNoOfFitBins()
{
// In order not having to loop over all bins and to stay consistent with the chisq method, calculate the start and end bins explicitly
Int_t startTimeBin = static_cast<Int_t>(ceil((fFitStartTime - fData.GetDataTimeStart())/fData.GetDataTimeStep()));
if (startTimeBin < 0)
startTimeBin = 0;
Int_t endTimeBin = static_cast<Int_t>(floor((fFitEndTime - fData.GetDataTimeStart())/fData.GetDataTimeStep())) + 1;
if (endTimeBin > static_cast<Int_t>(fData.GetValue()->size()))
endTimeBin = fData.GetValue()->size();
if (endTimeBin > startTimeBin)
fNoOfFitBins = endTimeBin - startTimeBin;
else
fNoOfFitBins = 0;
}
//--------------------------------------------------------------------------
// CalcTheory
//--------------------------------------------------------------------------
@ -151,13 +300,42 @@ UInt_t PRunMuMinus::GetNoOfFitBins()
*/
void PRunMuMinus::CalcTheory()
{
// feed the parameter vector
std::vector<Double_t> par;
PMsrParamList *paramList = fMsrInfo->GetMsrParamList();
for (UInt_t i=0; i<paramList->size(); i++)
par.push_back((*paramList)[i].fValue);
// calculate functions
for (Int_t i=0; i<fMsrInfo->GetNoOfFuncs(); i++) {
fFuncValues[i] = fMsrInfo->EvalFunc(fMsrInfo->GetFuncNo(i), *fRunInfo->GetMap(), par);
}
// calculate theory
UInt_t size = fData.GetValue()->size();
Double_t start = fData.GetDataTimeStart();
Double_t resolution = fData.GetDataTimeStep();
Double_t time;
for (UInt_t i=0; i<size; i++) {
time = start + (Double_t)i*resolution;
fData.AppendTheoryValue(fTheory->Func(time, par, fFuncValues));
}
// clean up
par.clear();
}
//--------------------------------------------------------------------------
// PrepareData
//--------------------------------------------------------------------------
/**
* <p>Prepare data for fitting or viewing. <b>(Not yet implemented)</b>
* <p>Prepare data for fitting or viewing. What is already processed at this stage:
* -# get proper raw run data
* -# get all needed forward histograms
* -# get time resolution
* -# get t0's and perform necessary cross checks (e.g. if t0 of msr-file (if present) are consistent with t0 of the data files, etc.)
* -# add runs (if addruns are present)
* -# group histograms (if grouping is present)
*
* <b>return:</b>
* - true if everthing went smooth
@ -167,8 +345,414 @@ Bool_t PRunMuMinus::PrepareData()
{
Bool_t success = true;
cout << endl << "in PRunMuMinus::PrepareData(): will feed fData" << endl;
// get the proper run
PRawRunData* runData = fRawData->GetRunData(*fRunInfo->GetRunName());
if (!runData) { // couldn't get run
cerr << endl << ">> PRunSingleHisto::PrepareData(): **ERROR** Couldn't get run " << fRunInfo->GetRunName()->Data() << "!";
cerr << endl;
return false;
}
// collect histogram numbers
PUIntVector histoNo; // histoNo = msr-file forward + redGreen_offset - 1
for (UInt_t i=0; i<fRunInfo->GetForwardHistoNoSize(); i++) {
histoNo.push_back(fRunInfo->GetForwardHistoNo(i));
if (!runData->IsPresent(histoNo[i])) {
cerr << endl << ">> PRunSingleHisto::PrepareData(): **PANIC ERROR**:";
cerr << endl << ">> histoNo found = " << histoNo[i] << ", which is NOT present in the data file!?!?";
cerr << endl << ">> Will quit :-(";
cerr << endl;
histoNo.clear();
return false;
}
}
// feed all T0's
// first init T0's, T0's are stored as (forward T0, backward T0, etc.)
fT0s.clear();
fT0s.resize(histoNo.size());
for (UInt_t i=0; i<fT0s.size(); i++) {
fT0s[i] = -1.0;
}
// fill in the T0's from the msr-file (if present)
for (UInt_t i=0; i<fRunInfo->GetT0BinSize(); i++) {
fT0s[i] = fRunInfo->GetT0Bin(i);
}
// fill in the T0's from the data file, if not already present in the msr-file
for (UInt_t i=0; i<histoNo.size(); i++) {
if (fT0s[i] == -1.0) // i.e. not present in the msr-file, try the data file
if (runData->GetT0Bin(histoNo[i]) > 0.0) {
fT0s[i] = runData->GetT0Bin(histoNo[i]);
fRunInfo->SetT0Bin(fT0s[i], i); // keep value for the msr-file
}
}
// fill in the T0's gaps, i.e. in case the T0's are NOT in the msr-file and NOT in the data file
for (UInt_t i=0; i<histoNo.size(); i++) {
if (fT0s[i] == -1.0) { // i.e. not present in the msr-file and data file, use the estimated T0
fT0s[i] = runData->GetT0BinEstimated(histoNo[i]);
fRunInfo->SetT0Bin(fT0s[i], i); // keep value for the msr-file
cerr << endl << ">> PRunSingleHisto::PrepareData(): **WARRNING** NO t0's found, neither in the run data nor in the msr-file!";
cerr << endl << ">> run: " << fRunInfo->GetRunName();
cerr << endl << ">> will try the estimated one: forward t0 = " << runData->GetT0BinEstimated(histoNo[i]);
cerr << endl << ">> NO WARRANTY THAT THIS OK!! For instance for LEM this is almost for sure rubbish!";
cerr << endl;
}
}
// check if t0 is within proper bounds
for (UInt_t i=0; i<fRunInfo->GetForwardHistoNoSize(); i++) {
if ((fT0s[i] < 0) || (fT0s[i] > (Int_t)runData->GetDataBin(histoNo[i])->size())) {
cerr << endl << ">> PRunSingleHisto::PrepareData(): **ERROR** t0 data bin (" << fT0s[i] << ") doesn't make any sense!";
cerr << endl;
return false;
}
}
// keep the histo of each group at this point (addruns handled below)
vector<PDoubleVector> forward;
forward.resize(histoNo.size()); // resize to number of groups
for (UInt_t i=0; i<histoNo.size(); i++) {
forward[i].resize(runData->GetDataBin(histoNo[i])->size());
forward[i] = *runData->GetDataBin(histoNo[i]);
}
// check if there are runs to be added to the current one
if (fRunInfo->GetRunNameSize() > 1) { // runs to be added present
PRawRunData *addRunData;
for (UInt_t i=1; i<fRunInfo->GetRunNameSize(); i++) {
// get run to be added to the main one
addRunData = fRawData->GetRunData(*fRunInfo->GetRunName(i));
if (addRunData == 0) { // couldn't get run
cerr << endl << ">> PRunSingleHisto::PrepareData(): **ERROR** Couldn't get addrun " << fRunInfo->GetRunName(i)->Data() << "!";
cerr << endl;
return false;
}
// feed all T0's
// first init T0's, T0's are stored as (forward T0, backward T0, etc.)
PDoubleVector t0Add;
t0Add.resize(histoNo.size());
for (UInt_t j=0; j<t0Add.size(); j++) {
t0Add[j] = -1.0;
}
// fill in the T0's from the msr-file (if present)
for (UInt_t j=0; j<fRunInfo->GetT0BinSize(); j++) {
t0Add[j] = fRunInfo->GetAddT0Bin(i-1,j); // addRunIdx starts at 0
}
// fill in the T0's from the data file, if not already present in the msr-file
for (UInt_t j=0; j<histoNo.size(); j++) {
if (t0Add[j] == -1.0) // i.e. not present in the msr-file, try the data file
if (addRunData->GetT0Bin(histoNo[j]) > 0.0) {
t0Add[j] = addRunData->GetT0Bin(histoNo[j]);
fRunInfo->SetAddT0Bin(t0Add[j], i-1, j); // keep value for the msr-file
}
}
// fill in the T0's gaps, i.e. in case the T0's are NOT in the msr-file and NOT in the data file
for (UInt_t j=0; j<histoNo.size(); j++) {
if (t0Add[j] == -1.0) { // i.e. not present in the msr-file and data file, use the estimated T0
t0Add[j] = addRunData->GetT0BinEstimated(histoNo[j]);
fRunInfo->SetAddT0Bin(t0Add[j], i-1, j); // keep value for the msr-file
cerr << endl << ">> PRunSingleHisto::PrepareData(): **WARRNING** NO t0's found, neither in the run data nor in the msr-file!";
cerr << endl << ">> run: " << fRunInfo->GetRunName();
cerr << endl << ">> will try the estimated one: forward t0 = " << addRunData->GetT0BinEstimated(histoNo[j]);
cerr << endl << ">> NO WARRANTY THAT THIS OK!! For instance for LEM this is almost for sure rubbish!";
cerr << endl;
}
}
// check if t0 is within proper bounds
for (UInt_t j=0; j<fRunInfo->GetForwardHistoNoSize(); j++) {
if ((t0Add[j] < 0) || (t0Add[j] > (Int_t)addRunData->GetDataBin(histoNo[j])->size())) {
cerr << endl << ">> PRunSingleHisto::PrepareData(): **ERROR** addt0 data bin (" << t0Add[j] << ") doesn't make any sense!";
cerr << endl;
return false;
}
}
// add forward run
UInt_t addRunSize;
for (UInt_t k=0; k<histoNo.size(); k++) { // fill each group
addRunSize = addRunData->GetDataBin(histoNo[k])->size();
for (UInt_t j=0; j<addRunData->GetDataBin(histoNo[k])->size(); j++) { // loop over the bin indices
// make sure that the index stays in the proper range
if ((j+(Int_t)t0Add[k]-(Int_t)fT0s[k] >= 0) && (j+(Int_t)t0Add[k]-(Int_t)fT0s[k] < addRunSize)) {
forward[k][j] += addRunData->GetDataBin(histoNo[k])->at(j+(Int_t)t0Add[k]-(Int_t)fT0s[k]);
}
}
}
// clean up
t0Add.clear();
}
}
// set forward/backward histo data of the first group
fForward.resize(forward[0].size());
for (UInt_t i=0; i<fForward.size(); i++) {
fForward[i] = forward[0][i];
}
// group histograms, add all the remaining forward histograms of the group
for (UInt_t i=1; i<histoNo.size(); i++) { // loop over the groupings
for (UInt_t j=0; j<runData->GetDataBin(histoNo[i])->size(); j++) { // loop over the bin indices
// make sure that the index stays within proper range
if ((j+fT0s[i]-fT0s[0] >= 0) && (j+fT0s[i]-fT0s[0] < runData->GetDataBin(histoNo[i])->size())) {
fForward[j] += forward[i][j+(Int_t)fT0s[i]-(Int_t)fT0s[0]];
}
}
}
// keep the time resolution in (us)
fTimeResolution = runData->GetTimeResolution()/1.0e3;
cout.precision(10);
cout << endl << ">> PRunSingleHisto::PrepareData(): time resolution=" << fixed << runData->GetTimeResolution() << "(ns)" << endl;
if (fHandleTag == kFit)
success = PrepareFitData(runData, histoNo[0]);
else if (fHandleTag == kView)
success = PrepareRawViewData(runData, histoNo[0]);
else
success = false;
// cleanup
histoNo.clear();
return success;
}
//--------------------------------------------------------------------------
// PrepareFitData (private)
//--------------------------------------------------------------------------
/**
* <p>Take the pre-processed data (i.e. grouping and addrun are preformed) and form the histogram for fitting.
* The following steps are preformed:
* -# get fit start/stop time
* -# check that 'first good data bin', 'last good data bin', and 't0' make any sense
* -# packing (i.e rebinning)
*
* <b>return:</b>
* - true, if everything went smooth
* - false, otherwise
*
* \param runData raw run data handler
* \param histoNo forward histogram number
*/
Bool_t PRunMuMinus::PrepareFitData(PRawRunData* runData, const UInt_t histoNo)
{
// transform raw histo data. This is done the following way (for details see the manual):
// for the single histo fit, just the rebinned raw data are copied
// first get start data, end data, and t0
Int_t start;
Int_t end;
start = fRunInfo->GetDataRange(0);
end = fRunInfo->GetDataRange(1);
// check if data range has been provided, and if not try to estimate them
if (start < 0) {
Int_t offset = (Int_t)(10.0e-3/fTimeResolution);
start = (Int_t)fT0s[0]+offset;
fRunInfo->SetDataRange(start, 0);
cerr << endl << ">> PRunSingleHisto::PrepareData(): **WARNING** data range was not provided, will try data range start = t0+" << offset << "(=10ns) = " << start << ".";
cerr << endl << ">> NO WARRANTY THAT THIS DOES MAKE ANY SENSE.";
cerr << endl;
}
if (end < 0) {
end = fForward.size();
fRunInfo->SetDataRange(end, 1);
cerr << endl << ">> PRunSingleHisto::PrepareData(): **WARNING** data range was not provided, will try data range end = " << end << ".";
cerr << endl << ">> NO WARRANTY THAT THIS DOES MAKE ANY SENSE.";
cerr << endl;
}
// check if start and end make any sense
// 1st check if start and end are in proper order
if (end < start) { // need to swap them
Int_t keep = end;
end = start;
start = keep;
}
// 2nd check if start is within proper bounds
if ((start < 0) || (start > (Int_t)fForward.size())) {
cerr << endl << ">> PRunSingleHisto::PrepareFitData(): **ERROR** start data bin doesn't make any sense!";
cerr << endl;
return false;
}
// 3rd check if end is within proper bounds
if ((end < 0) || (end > (Int_t)fForward.size())) {
cerr << endl << ">> PRunSingleHisto::PrepareFitData(): **ERROR** end data bin doesn't make any sense!";
cerr << endl;
return false;
}
// everything looks fine, hence fill data set
Int_t t0 = (Int_t)fT0s[0];
Double_t value = 0.0;
// data start at data_start-t0
// time shifted so that packing is included correctly, i.e. t0 == t0 after packing
fData.SetDataTimeStart(fTimeResolution*((Double_t)start-(Double_t)t0+(Double_t)(fRunInfo->GetPacking()-1)/2.0));
fData.SetDataTimeStep(fTimeResolution*fRunInfo->GetPacking());
for (Int_t i=start; i<end; i++) {
if (fRunInfo->GetPacking() == 1) {
value = fForward[i];
fData.AppendValue(value);
if (value == 0.0)
fData.AppendErrorValue(1.0);
else
fData.AppendErrorValue(TMath::Sqrt(value));
} else { // packed data, i.e. fRunInfo->GetPacking() > 1
if (((i-start) % fRunInfo->GetPacking() == 0) && (i != start)) { // fill data
fData.AppendValue(value);
if (value == 0.0)
fData.AppendErrorValue(1.0);
else
fData.AppendErrorValue(TMath::Sqrt(value));
// reset values
value = 0.0;
}
value += fForward[i];
}
}
CalcNoOfFitBins();
return true;
}
//--------------------------------------------------------------------------
// PrepareRawViewData (private)
//--------------------------------------------------------------------------
/**
* <p>Take the pre-processed data (i.e. grouping and addrun are preformed) and form the histogram for viewing
* without any life time correction.
* <p>The following steps are preformed:
* -# check if view packing is whished.
* -# check that 'first good data bin', 'last good data bin', and 't0' makes any sense
* -# packing (i.e. rebinnig)
* -# calculate theory
*
* <b>return:</b>
* - true, if everything went smooth
* - false, otherwise.
*
* \param runData raw run data handler
* \param histoNo forward histogram number
*/
Bool_t PRunMuMinus::PrepareRawViewData(PRawRunData* runData, const UInt_t histoNo)
{
// check if view_packing is wished
Int_t packing = fRunInfo->GetPacking();
if (fMsrInfo->GetMsrPlotList()->at(0).fViewPacking > 0) {
packing = fMsrInfo->GetMsrPlotList()->at(0).fViewPacking;
}
// calculate necessary norms
Double_t theoryNorm = 1.0;
if (fMsrInfo->GetMsrPlotList()->at(0).fViewPacking > 0) {
theoryNorm = (Double_t)fMsrInfo->GetMsrPlotList()->at(0).fViewPacking/(Double_t)fRunInfo->GetPacking();
}
// raw data, since PMusrCanvas is doing ranging etc.
// start = the first bin which is a multiple of packing backward from first good data bin
Int_t start = fRunInfo->GetDataRange(0) - (fRunInfo->GetDataRange(0)/packing)*packing;
// end = last bin starting from start which is a multipl of packing and still within the data
Int_t end = start + ((fForward.size()-start)/packing)*packing;
// check if data range has been provided, and if not try to estimate them
if (start < 0) {
Int_t offset = (Int_t)(10.0e-3/fTimeResolution);
start = ((Int_t)fT0s[0]+offset) - (((Int_t)fT0s[0]+offset)/packing)*packing;
end = start + ((fForward.size()-start)/packing)*packing;
cerr << endl << ">> PRunSingleHisto::PrepareData(): **WARNING** data range was not provided, will try data range start = " << start << ".";
cerr << endl << ">> NO WARRANTY THAT THIS DOES MAKE ANY SENSE.";
cerr << endl;
}
// check if start, end, and t0 make any sense
// 1st check if start and end are in proper order
if (end < start) { // need to swap them
Int_t keep = end;
end = start;
start = keep;
}
// 2nd check if start is within proper bounds
if ((start < 0) || (start > (Int_t)fForward.size())) {
cerr << endl << ">> PRunSingleHisto::PrepareRawViewData(): **ERROR** start data bin doesn't make any sense!";
cerr << endl;
return false;
}
// 3rd check if end is within proper bounds
if ((end < 0) || (end > (Int_t)fForward.size())) {
cerr << endl << ">> PRunSingleHisto::PrepareRawViewData(): **ERROR** end data bin doesn't make any sense!";
cerr << endl;
return false;
}
// everything looks fine, hence fill data set
Int_t t0 = (Int_t)fT0s[0];
Double_t value = 0.0;
// data start at data_start-t0
// time shifted so that packing is included correctly, i.e. t0 == t0 after packing
fData.SetDataTimeStart(fTimeResolution*((Double_t)start-(Double_t)t0+(Double_t)(packing-1)/2.0));
fData.SetDataTimeStep(fTimeResolution*packing);
for (Int_t i=start; i<end; i++) {
if (((i-start) % packing == 0) && (i != start)) { // fill data
fData.AppendValue(value);
if (value == 0.0)
fData.AppendErrorValue(1.0);
else
fData.AppendErrorValue(TMath::Sqrt(value));
// reset values
value = 0.0;
}
value += fForward[i];
}
CalcNoOfFitBins();
// fill theory vector for kView
// feed the parameter vector
std::vector<Double_t> par;
PMsrParamList *paramList = fMsrInfo->GetMsrParamList();
for (UInt_t i=0; i<paramList->size(); i++)
par.push_back((*paramList)[i].fValue);
// calculate functions
for (Int_t i=0; i<fMsrInfo->GetNoOfFuncs(); i++) {
fFuncValues[i] = fMsrInfo->EvalFunc(fMsrInfo->GetFuncNo(i), *fRunInfo->GetMap(), par);
}
// calculate theory
UInt_t size = fForward.size();
Double_t factor = 1.0;
if (fData.GetValue()->size() * 10 > fForward.size()) {
size = fData.GetValue()->size() * 10;
factor = (Double_t)fForward.size() / (Double_t)size;
}
Double_t time;
Double_t theoryValue;
fData.SetTheoryTimeStart(fData.GetDataTimeStart());
fData.SetTheoryTimeStep(fTimeResolution*factor);
for (UInt_t i=0; i<size; i++) {
time = fData.GetTheoryTimeStart() + i*fData.GetTheoryTimeStep();
theoryValue = fTheory->Func(time, par, fFuncValues);
if (fabs(theoryValue) > 1.0e10) { // dirty hack needs to be fixed!!
theoryValue = 0.0;
}
fData.AppendTheoryValue(theoryNorm*theoryValue);
}
// clean up
par.clear();
return true;
}

View File

@ -74,6 +74,7 @@ PRunSingleHisto::PRunSingleHisto() : PRunBase()
PRunSingleHisto::PRunSingleHisto(PMsrHandler *msrInfo, PRunDataHandler *rawData, UInt_t runNo, EPMusrHandleTag tag) : PRunBase(msrInfo, rawData, runNo, tag)
{
fScaleN0AndBkg = IsScaleN0AndBkg();
fNoOfFitBins = 0;
if (!PrepareData()) {
cerr << endl << ">> PRunSingleHisto::PRunSingleHisto: **SEVERE ERROR**: Couldn't prepare data for fitting!";

View File

@ -397,6 +397,10 @@ Double_t PTheory::Func(register Double_t t, const PDoubleVector& paramValues, co
if (fMul) {
if (fAdd) { // fMul != 0 && fAdd != 0
switch (fType) {
case THEORY_CONST:
return Constant(paramValues, funcValues) * fMul->Func(t, paramValues, funcValues) +
fAdd->Func(t, paramValues, funcValues);
break;
case THEORY_ASYMMETRY:
return Asymmetry(paramValues, funcValues) * fMul->Func(t, paramValues, funcValues) +
fAdd->Func(t, paramValues, funcValues);
@ -488,6 +492,9 @@ Double_t PTheory::Func(register Double_t t, const PDoubleVector& paramValues, co
}
} else { // fMul !=0 && fAdd == 0
switch (fType) {
case THEORY_CONST:
return Constant(paramValues, funcValues) * fMul->Func(t, paramValues, funcValues);
break;
case THEORY_ASYMMETRY:
return Asymmetry(paramValues, funcValues) * fMul->Func(t, paramValues, funcValues);
break;
@ -560,6 +567,9 @@ Double_t PTheory::Func(register Double_t t, const PDoubleVector& paramValues, co
} else { // fMul == 0 && fAdd != 0
if (fAdd) {
switch (fType) {
case THEORY_CONST:
return Constant(paramValues, funcValues) + fAdd->Func(t, paramValues, funcValues);
break;
case THEORY_ASYMMETRY:
return Asymmetry(paramValues, funcValues) + fAdd->Func(t, paramValues, funcValues);
break;
@ -630,6 +640,9 @@ Double_t PTheory::Func(register Double_t t, const PDoubleVector& paramValues, co
}
} else { // fMul == 0 && fAdd == 0
switch (fType) {
case THEORY_CONST:
return Constant(paramValues, funcValues);
break;
case THEORY_ASYMMETRY:
return Asymmetry(paramValues, funcValues);
break;
@ -981,6 +994,34 @@ void PTheory::MakeCleanAndTidyUserFcn(UInt_t i, PMsrLines *fullTheoryBlock)
}
}
//--------------------------------------------------------------------------
/**
* <p> theory function: Const
* \f[ = const \f]
*
* <b>meaning of paramValues:</b> const
*
* <b>return:</b> function value
*
* \param paramValues vector with the parameters
* \param funcValues vector with the functions (i.e. functions of the parameters)
*/
Double_t PTheory::Constant(const PDoubleVector& paramValues, const PDoubleVector& funcValues) const
{
// expected parameters: const
Double_t constant;
// check if FUNCTIONS are used
if (fParamNo[0] < MSR_PARAM_FUN_OFFSET) { // parameter or resolved map
constant = paramValues[fParamNo[0]];
} else {
constant = funcValues[fParamNo[0]-MSR_PARAM_FUN_OFFSET];
}
return constant;
}
//--------------------------------------------------------------------------
/**
* <p> theory function: Asymmetry

View File

@ -58,13 +58,14 @@ class PFunctionHandler
virtual Int_t GetFuncNo(UInt_t idx);
virtual Int_t GetFuncIndex(Int_t funcNo);
virtual UInt_t GetNoOfFuncs() { return fFuncs.size(); }
virtual TString* GetFuncString(UInt_t idx);
virtual TString GetFuncString(UInt_t idx);
private:
Bool_t fValid; ///< true = function handler has valid functions
PMsrLines fLines; ///< stores the msr-file FUNCTIONS block as clear text.
vector<PFunction> fFuncs; ///< vector of all evaluatable functions
vector<TString> fFuncComment; ///< vector of prepended function comments
};
#endif // _PFUNCTIONHANDLER_H_

View File

@ -38,7 +38,7 @@ using namespace std;
#include <TString.h>
#define PMUSR_VERSION "0.10.0"
#define PMUSR_VERSION "0.11.0"
#define PMUSR_SUCCESS 0
#define PMUSR_WRONG_STARTUP_SYNTAX -1

View File

@ -52,10 +52,15 @@ class PRunMuMinus : public PRunBase
virtual UInt_t GetNoOfFitBins();
protected:
virtual void CalcNoOfFitBins();
virtual Bool_t PrepareData();
virtual Bool_t PrepareFitData(PRawRunData* runData, const UInt_t histoNo);
virtual Bool_t PrepareRawViewData(PRawRunData* runData, const UInt_t histoNo);
private:
UInt_t fNoOfFitBins; ///< number of bins to be fitted
PDoubleVector fForward; ///< forward histo data
};
#endif // _PRUNMUMINUS_H_

View File

@ -45,31 +45,33 @@
// function tags
#define THEORY_UNDEFINED -1
#define THEORY_ASYMMETRY 0
#define THEORY_SIMPLE_EXP 1
#define THEORY_GENERAL_EXP 2
#define THEORY_SIMPLE_GAUSS 3
#define THEORY_STATIC_GAUSS_KT 4
#define THEORY_STATIC_GAUSS_KT_LF 5
#define THEORY_DYNAMIC_GAUSS_KT_LF 6
#define THEORY_STATIC_LORENTZ_KT 7
#define THEORY_STATIC_LORENTZ_KT_LF 8
#define THEORY_DYNAMIC_LORENTZ_KT_LF 9
#define THEORY_COMBI_LGKT 10
#define THEORY_SPIN_GLASS 11
#define THEORY_RANDOM_ANISOTROPIC_HYPERFINE 12
#define THEORY_ABRAGAM 13
#define THEORY_INTERNAL_FIELD 14
#define THEORY_TF_COS 15
#define THEORY_BESSEL 16
#define THEORY_INTERNAL_BESSEL 17
#define THEORY_SKEWED_GAUSS 18
#define THEORY_POLYNOM 19
#define THEORY_USER_FCN 20
#define THEORY_CONST 0
#define THEORY_ASYMMETRY 1
#define THEORY_SIMPLE_EXP 2
#define THEORY_GENERAL_EXP 3
#define THEORY_SIMPLE_GAUSS 4
#define THEORY_STATIC_GAUSS_KT 5
#define THEORY_STATIC_GAUSS_KT_LF 6
#define THEORY_DYNAMIC_GAUSS_KT_LF 7
#define THEORY_STATIC_LORENTZ_KT 8
#define THEORY_STATIC_LORENTZ_KT_LF 9
#define THEORY_DYNAMIC_LORENTZ_KT_LF 10
#define THEORY_COMBI_LGKT 11
#define THEORY_SPIN_GLASS 12
#define THEORY_RANDOM_ANISOTROPIC_HYPERFINE 13
#define THEORY_ABRAGAM 14
#define THEORY_INTERNAL_FIELD 15
#define THEORY_TF_COS 16
#define THEORY_BESSEL 17
#define THEORY_INTERNAL_BESSEL 18
#define THEORY_SKEWED_GAUSS 19
#define THEORY_POLYNOM 20
#define THEORY_USER_FCN 21
// function parameter tags, i.e. how many parameters has a specific function
// if there is a comment with a (tshift), the number of parameters is increased by one
#define THEORY_PARAM_ASYMMETRY 1 // asymetry
#define THEORY_PARAM_CONST 1 // const
#define THEORY_PARAM_ASYMMETRY 1 // asymmetry
#define THEORY_PARAM_SIMPLE_EXP 1 // damping (tshift)
#define THEORY_PARAM_GENERAL_EXP 2 // damping, exponents (tshift)
#define THEORY_PARAM_SIMPLE_GAUSS 1 // damping (tshift)
@ -90,7 +92,7 @@
#define THEORY_PARAM_SKEWED_GAUSS 4 // phase, frequency, rate minus, rate plus (tshift)
// number of available user functions
#define THEORY_MAX 21
#define THEORY_MAX 22
// maximal number of parameters. Needed in the contents of LF
#define THEORY_MAX_PARAM 10
@ -122,68 +124,72 @@ typedef struct theo_data_base {
*/
static PTheoDataBase fgTheoDataBase[THEORY_MAX] = {
{THEORY_ASYMMETRY, THEORY_PARAM_ASYMMETRY, false,
"asymmetry", "a", "", ""},
{THEORY_CONST, THEORY_PARAM_CONST, false,
"const", "c", "", ""},
{THEORY_SIMPLE_EXP, THEORY_PARAM_SIMPLE_EXP, false,
"simplExpo", "se", "(rate)", "(rate tshift)"},
{THEORY_ASYMMETRY, THEORY_PARAM_ASYMMETRY, false,
"asymmetry", "a", "", ""},
{THEORY_GENERAL_EXP, THEORY_PARAM_GENERAL_EXP, false,
"generExpo", "ge", "(rate exponent)", "(rate exponent tshift)"},
{THEORY_SIMPLE_EXP, THEORY_PARAM_SIMPLE_EXP, false,
"simplExpo", "se", "(rate)", "(rate tshift)"},
{THEORY_SIMPLE_GAUSS, THEORY_PARAM_SIMPLE_GAUSS, false,
"simpleGss", "sg", "(rate)", "(rate tshift)"},
{THEORY_GENERAL_EXP, THEORY_PARAM_GENERAL_EXP, false,
"generExpo", "ge", "(rate exponent)", "(rate exponent tshift)"},
{THEORY_STATIC_GAUSS_KT, THEORY_PARAM_STATIC_GAUSS_KT, false,
"statGssKt", "stg", "(rate)", "(rate tshift)"},
{THEORY_SIMPLE_GAUSS, THEORY_PARAM_SIMPLE_GAUSS, false,
"simpleGss", "sg", "(rate)", "(rate tshift)"},
{THEORY_STATIC_GAUSS_KT_LF, THEORY_PARAM_STATIC_GAUSS_KT_LF, true,
"statGssKTLF", "sgktlf", "(frequency damping)", "(frequency damping tshift)"},
{THEORY_STATIC_GAUSS_KT, THEORY_PARAM_STATIC_GAUSS_KT, false,
"statGssKt", "stg", "(rate)", "(rate tshift)"},
{THEORY_DYNAMIC_GAUSS_KT_LF, THEORY_PARAM_DYNAMIC_GAUSS_KT_LF, true,
"dynGssKTLF", "dgktlf", "(frequency damping hopping-rate)", "(frequency damping hopping-rate tshift)"},
{THEORY_STATIC_GAUSS_KT_LF, THEORY_PARAM_STATIC_GAUSS_KT_LF, true,
"statGssKTLF", "sgktlf", "(frequency damping)", "(frequency damping tshift)"},
{THEORY_STATIC_LORENTZ_KT, THEORY_PARAM_STATIC_LORENTZ_KT, true,
"statExpKT", "sekt", "(rate)", "(rate tshift)"},
{THEORY_DYNAMIC_GAUSS_KT_LF, THEORY_PARAM_DYNAMIC_GAUSS_KT_LF, true,
"dynGssKTLF", "dgktlf", "(frequency damping hopping-rate)", "(frequency damping hopping-rate tshift)"},
{THEORY_STATIC_LORENTZ_KT_LF, THEORY_PARAM_STATIC_LORENTZ_KT_LF, true,
"statExpKTLF", "sektlf", "(frequency damping)", "(frequency damping tshift)"},
{THEORY_STATIC_LORENTZ_KT, THEORY_PARAM_STATIC_LORENTZ_KT, true,
"statExpKT", "sekt", "(rate)", "(rate tshift)"},
{THEORY_DYNAMIC_LORENTZ_KT_LF, THEORY_PARAM_DYNAMIC_LORENTZ_KT_LF, true,
"dynExpKTLF", "dektlf", "(frequency damping hopping-rate)", "(frequency damping hopping-rate tshift)"},
{THEORY_STATIC_LORENTZ_KT_LF, THEORY_PARAM_STATIC_LORENTZ_KT_LF, true,
"statExpKTLF", "sektlf", "(frequency damping)", "(frequency damping tshift)"},
{THEORY_COMBI_LGKT, THEORY_PARAM_COMBI_LGKT, false,
"combiLGKT", "lgkt", "(LorentzRate GaussRate)", "(LorentzRate GaussRate tshift)"},
{THEORY_DYNAMIC_LORENTZ_KT_LF, THEORY_PARAM_DYNAMIC_LORENTZ_KT_LF, true,
"dynExpKTLF", "dektlf", "(frequency damping hopping-rate)", "(frequency damping hopping-rate tshift)"},
{THEORY_SPIN_GLASS, THEORY_PARAM_SPIN_GLASS, false,
"spinGlass", "spg", "(rate hopprate order)", "(rate hopprate order tshift)"},
{THEORY_COMBI_LGKT, THEORY_PARAM_COMBI_LGKT, false,
"combiLGKT", "lgkt", "(LorentzRate GaussRate)", "(LorentzRate GaussRate tshift)"},
{THEORY_RANDOM_ANISOTROPIC_HYPERFINE, THEORY_PARAM_RANDOM_ANISOTROPIC_HYPERFINE, false,
"rdAnisoHf", "rahf", "(frequency rate)", "(frequency rate tshift)"},
{THEORY_SPIN_GLASS, THEORY_PARAM_SPIN_GLASS, false,
"spinGlass", "spg", "(rate hopprate order)", "(rate hopprate order tshift)"},
{THEORY_ABRAGAM, THEORY_PARAM_ABRAGAM, false,
"abragam", "ab", "(rate hopprate)", "(rate hopprate tshift)"},
{THEORY_RANDOM_ANISOTROPIC_HYPERFINE, THEORY_PARAM_RANDOM_ANISOTROPIC_HYPERFINE, false,
"rdAnisoHf", "rahf", "(frequency rate)", "(frequency rate tshift)"},
{THEORY_INTERNAL_FIELD, THEORY_PARAM_INTERNAL_FIELD, false,
"internFld", "if", "(fraction phase frequency Trate Lrate)", "(fraction phase frequency Trate Lrate tshift)"},
{THEORY_ABRAGAM, THEORY_PARAM_ABRAGAM, false,
"abragam", "ab", "(rate hopprate)", "(rate hopprate tshift)"},
{THEORY_TF_COS, THEORY_PARAM_TF_COS, false,
"TFieldCos", "tf", "(phase frequency)", "(phase frequency tshift)"},
{THEORY_INTERNAL_FIELD, THEORY_PARAM_INTERNAL_FIELD, false,
"internFld", "if", "(fraction phase frequency Trate Lrate)", "(fraction phase frequency Trate Lrate tshift)"},
{THEORY_BESSEL, THEORY_PARAM_BESSEL, false,
"bessel", "b", "(phase frequency)", "(phase frequency tshift)"},
{THEORY_TF_COS, THEORY_PARAM_TF_COS, false,
"TFieldCos", "tf", "(phase frequency)", "(phase frequency tshift)"},
{THEORY_INTERNAL_BESSEL, THEORY_PARAM_INTERNAL_BESSEL, false,
"internBsl", "ib", "(fraction phase frequency Trate Lrate)", "(fraction phase frequency Trate Lrate tshift)"},
{THEORY_BESSEL, THEORY_PARAM_BESSEL, false,
"bessel", "b", "(phase frequency)", "(phase frequency tshift)"},
{THEORY_SKEWED_GAUSS, THEORY_PARAM_SKEWED_GAUSS, false,
"skewedGss", "skg", "(phase frequency rate_m rate_p)", "(phase frequency rate_m rate_p tshift)"},
{THEORY_INTERNAL_BESSEL, THEORY_PARAM_INTERNAL_BESSEL, false,
"internBsl", "ib", "(fraction phase frequency Trate Lrate)", "(fraction phase frequency Trate Lrate tshift)"},
{THEORY_POLYNOM, 0, false,
"polynom", "p", "(tshift p0 p1 ... pn)", "(tshift p0 p1 ... pn)"},
{THEORY_SKEWED_GAUSS, THEORY_PARAM_SKEWED_GAUSS, false,
"skewedGss", "skg", "(phase frequency rate_m rate_p)", "(phase frequency rate_m rate_p tshift)"},
{THEORY_USER_FCN, 0, false,
"userFcn", "u", "", ""}};
{THEORY_POLYNOM, 0, false,
"polynom", "p", "(tshift p0 p1 ... pn)", "(tshift p0 p1 ... pn)"},
{THEORY_USER_FCN, 0, false,
"userFcn", "u", "", ""}
};
//--------------------------------------------------------------------------------------
/**
@ -206,6 +212,7 @@ class PTheory
virtual void MakeCleanAndTidyPolynom(UInt_t i, PMsrLines* fullTheoryBlock);
virtual void MakeCleanAndTidyUserFcn(UInt_t i, PMsrLines* fullTheoryBlock);
virtual Double_t Constant(const PDoubleVector& paramValues, const PDoubleVector& funcValues) const;
virtual Double_t Asymmetry(const PDoubleVector& paramValues, const PDoubleVector& funcValues) const;
virtual Double_t SimpleExp(register Double_t t, const PDoubleVector& paramValues, const PDoubleVector& funcValues) const;
virtual Double_t GeneralExp(register Double_t t, const PDoubleVector& paramValues, const PDoubleVector& funcValues) const;

View File

@ -146,7 +146,7 @@ void PFitOutputHandler::readFromStdErr()
*/
void PFitOutputHandler::processDone(int exitCode, QProcess::ExitStatus exitStatus)
{
if (exitStatus == QProcess::CrashExit)
if ((exitStatus == QProcess::CrashExit) && (exitCode != 0))
qDebug() << "**ERROR** PFitOutputHandler::processDone: exitCode = " << exitCode << endl;
fQuitButton->setText("Done");
}
@ -161,8 +161,9 @@ void PFitOutputHandler::quitButtonPressed()
// if the fitting is still taking place, kill it
if (fProc->state() == QProcess::Running) {
fProc->terminate();
if (!fProc->waitForFinished())
if (!fProc->waitForFinished()) {
fProc->kill();
}
}
accept();

View File

@ -196,7 +196,7 @@ void musrfit_dump_ascii(char *fileName, PRunListCollection *runList)
}
}
// rrf
// muMinus
size = runList->GetNoOfMuMinus();
if (size > 0) {
for (unsigned int i=0; i<size; i++) {
@ -329,7 +329,7 @@ void musrfit_dump_root(char *fileName, PRunListCollection *runList)
}
}
// rrf
// muMinus
size = runList->GetNoOfMuMinus();
if (size > 0) {
for (unsigned int i=0; i<size; i++) {