gnuastro-commits
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[gnuastro-commits] master e11098d 053/113: Recent work in master importe


From: Mohammad Akhlaghi
Subject: [gnuastro-commits] master e11098d 053/113: Recent work in master imported, minor conflicts fixed
Date: Fri, 16 Apr 2021 10:33:44 -0400 (EDT)

branch: master
commit e11098d3d164bb6a567f1f96ebf00961f9c8359f
Merge: 5831e9e d24fabe
Author: Mohammad Akhlaghi <mohammad@akhlaghi.org>
Commit: Mohammad Akhlaghi <mohammad@akhlaghi.org>

    Recent work in master imported, minor conflicts fixed
    
    Some minor conflicts in NoiseChisel existed that are now corrected.
---
 NEWS                           |  22 +++-
 THANKS                         |   3 +
 bin/mkcatalog/args.h           |  42 +++++++
 bin/mkcatalog/columns.c        | 114 +++++++++++++----
 bin/mkcatalog/main.h           |   8 +-
 bin/mkcatalog/mkcatalog.c      |   2 +-
 bin/mkcatalog/mkcatalog.h      |   4 +-
 bin/mkcatalog/ui.h             |   3 +
 bin/mkcatalog/upperlimit.c     | 136 +++++++++++++++++----
 bin/noisechisel/clumps.c       |  43 +++----
 bin/noisechisel/detection.c    | 240 ++++++++++++++++++++----------------
 bin/noisechisel/main.h         |   4 +-
 bin/noisechisel/noisechisel.c  |  42 ++++---
 bin/noisechisel/segmentation.c |   3 +-
 bin/noisechisel/sky.c          |  43 ++++---
 bin/noisechisel/ui.c           |  24 ++--
 bin/noisechisel/ui.h           |   7 ++
 doc/announce-acknowledge.txt   |   3 +
 doc/gnuastro.texi              | 270 ++++++++++++++++++++++++-----------------
 lib/fits.c                     |  19 ++-
 lib/statistics.c               |  88 +++++++++++---
 21 files changed, 757 insertions(+), 363 deletions(-)

diff --git a/NEWS b/NEWS
index 7e83d09..0fd4fd6 100644
--- a/NEWS
+++ b/NEWS
@@ -5,17 +5,31 @@ GNU Astronomy Utilities NEWS                          -*- 
outline -*-
 
 ** New features
 
+  MakeCatalog: The new `--mean' and `--median' options will calculate the
+  mean and median pixel value within an object or clump respectively.
+
+  MakeCatalog: The new `--upperlimitsigma' and `--upperlimitquantile'
+  columns will report the postion of the object's brightness with respect
+  to the distribution of randomly measured values. The former as a multiple
+  of sigma and the latter as a quantile. Also, `--upperlimitonesigma' will
+  return the 1-sigma value of the randomly placed upper limit magnitudes
+  (irrespective of the value given to `--upnsigma').
+
+  NoiseChisel: a value of `none' to the `--kernel' option will disable
+  convolution.
+
   Statistics: the new `--manualbinrange' allows the bins in histograms or
   cumulative frequency plots to be set outside the minimum or maximum
   values of the dataset.
 
-  MakeCatalog: The new `--mean' and `--median' options will calculate the
-  mean and median pixel value within an object or clump respectively.
-
 ** Removed features
 
 ** Changed features
 
+  Libraries: `gal_statistics_quantile_function' returns `inf' or `-inf' if
+  the given value is smaller than the minimum or larger than the maximum of
+  the input dataset's range.
+
 ** Bug fixes
 
   Many unused result warnings for asprintf in some compilers (bug #52979).
@@ -30,7 +44,9 @@ GNU Astronomy Utilities NEWS                          -*- 
outline -*-
 
   Statistics program bad results on integer columns with limits (bug #53230).
 
+  NoiseChisel crash when no growth is possible (bug #53268).
 
+  MakeCatalog parses area larger than clump (bug #53295).
 
 
 
diff --git a/THANKS b/THANKS
index e9a72b9..7ae2873 100644
--- a/THANKS
+++ b/THANKS
@@ -24,6 +24,7 @@ support in Gnuastro. The list is ordered alphabetically (by 
family name).
     Adrian Bunk                          bunk@debian.org
     Rosa Calvi                           rcalvi@iac.es
     Benjamin Clement                     benjamin.clement@univ-lyon1.fr
+    Nima Dehdilani                       nimadehdilani@gmail.com
     Antonio Diaz Diaz                    antonio@gnu.org
     Thérèse Godefroy                     godef.th@free.fr
     Madusha Gunawardhana                 gunawardhana@strw.leidenuniv.nl
@@ -51,7 +52,9 @@ support in Gnuastro. The list is ordered alphabetically (by 
family name).
     Éric Thiébaut                        eric.thiebaut@univ-lyon1.fr
     Ignacio Trujillo                     trujillo@iac.es
     David Valls-Gabaud                   david.valls-gabaud@obspm.fr
+    Aaron Watkins                        aaron.watkins@oulu.fi
     Christopher Willmer                  cnaw@as.arizona.edu
+    Sara Yousefi Taemeh                  s.yousefi.t@gmail.com
 
 
 Teams
diff --git a/bin/mkcatalog/args.h b/bin/mkcatalog/args.h
index 5602b8a..e5be022 100644
--- a/bin/mkcatalog/args.h
+++ b/bin/mkcatalog/args.h
@@ -913,6 +913,48 @@ struct argp_option program_options[] =
       ui_column_codes_ll
     },
     {
+      "upperlimitonesigma",
+      UI_KEY_UPPERLIMITONESIGMA,
+      0,
+      0,
+      "Upper-limit one sigma value.",
+      UI_GROUP_COLUMNS_BRIGHTNESS,
+      0,
+      GAL_TYPE_INVALID,
+      GAL_OPTIONS_RANGE_ANY,
+      GAL_OPTIONS_NOT_MANDATORY,
+      GAL_OPTIONS_NOT_SET,
+      ui_column_codes_ll
+    },
+    {
+      "upperlimitsigma",
+      UI_KEY_UPPERLIMITSIGMA,
+      0,
+      0,
+      "Place in upperlimit distribution (sigma multiple).",
+      UI_GROUP_COLUMNS_BRIGHTNESS,
+      0,
+      GAL_TYPE_INVALID,
+      GAL_OPTIONS_RANGE_ANY,
+      GAL_OPTIONS_NOT_MANDATORY,
+      GAL_OPTIONS_NOT_SET,
+      ui_column_codes_ll
+    },
+    {
+      "upperlimitquantile",
+      UI_KEY_UPPERLIMITQUANTILE,
+      0,
+      0,
+      "Quantile in random distribution (max 1).",
+      UI_GROUP_COLUMNS_BRIGHTNESS,
+      0,
+      GAL_TYPE_INVALID,
+      GAL_OPTIONS_RANGE_ANY,
+      GAL_OPTIONS_NOT_MANDATORY,
+      GAL_OPTIONS_NOT_SET,
+      ui_column_codes_ll
+    },
+    {
       "riverave",
       UI_KEY_RIVERAVE,
       0,
diff --git a/bin/mkcatalog/columns.c b/bin/mkcatalog/columns.c
index 035acae..6ed9670 100644
--- a/bin/mkcatalog/columns.c
+++ b/bin/mkcatalog/columns.c
@@ -334,9 +334,9 @@ columns_define_alloc(struct mkcatalogparams *p)
      smaller domain of raw measurements. So to avoid having to calculate
      something multiple times, each parameter will flag the intermediate
      parameters it requires in these arrays. */
-  oiflag = p->oiflag = gal_data_malloc_array(GAL_TYPE_UINT8, OCOL_NUMCOLS,
+  oiflag = p->oiflag = gal_data_calloc_array(GAL_TYPE_UINT8, OCOL_NUMCOLS,
                                              __func__, "oiflag");
-  ciflag = p->ciflag = gal_data_malloc_array(GAL_TYPE_UINT8, CCOL_NUMCOLS,
+  ciflag = p->ciflag = gal_data_calloc_array(GAL_TYPE_UINT8, CCOL_NUMCOLS,
                                              __func__, "ciflag");
 
   /* Allocate the columns. */
@@ -965,6 +965,45 @@ columns_define_alloc(struct mkcatalogparams *p)
           p->hasmag      = 1;   /* Doesn't need per-pixel calculations. */
           break;
 
+        case UI_KEY_UPPERLIMITONESIGMA:
+          name           = "UPPERLIMIT_ONE_SIGMA";
+          unit           = p->input->unit;
+          ocomment       = "One sigma value of all random measurements.";
+          ccomment       = ocomment;
+          otype          = GAL_TYPE_FLOAT32;
+          ctype          = GAL_TYPE_FLOAT32;
+          disp_fmt       = GAL_TABLE_DISPLAY_FMT_GENERAL;
+          disp_width     = 8;
+          disp_precision = 3;
+          p->upperlimit  = 1;
+          break;
+
+        case UI_KEY_UPPERLIMITSIGMA:
+          name           = "UPPERLIMIT_SIGMA";
+          unit           = "frac";
+          ocomment       = "Place in upperlimit distribution (sigma 
multiple).";
+          ccomment       = ocomment;
+          otype          = GAL_TYPE_FLOAT32;
+          ctype          = GAL_TYPE_FLOAT32;
+          disp_fmt       = GAL_TABLE_DISPLAY_FMT_GENERAL;
+          disp_width     = 8;
+          disp_precision = 3;
+          p->upperlimit  = 1;
+          break;
+
+        case UI_KEY_UPPERLIMITQUANTILE:
+          name           = "UPPERLIMIT_QUANTILE";
+          unit           = "quantile";
+          ocomment       = "Quantile of brightness in random distribution.";
+          ccomment       = ocomment;
+          otype          = GAL_TYPE_FLOAT32;
+          ctype          = GAL_TYPE_FLOAT32;
+          disp_fmt       = GAL_TABLE_DISPLAY_FMT_GENERAL;
+          disp_width     = 8;
+          disp_precision = 3;
+          p->upperlimit  = 1;
+          break;
+
         case UI_KEY_RIVERAVE:
           name           = "RIVER_AVE";
           unit           = p->input->unit ? p->input->unit : "pixelunit";
@@ -1422,6 +1461,27 @@ columns_second_order(struct mkcatalog_passparams *pp, 
double *row,
 
 
 
+/* The clump brightness is needed in several places, so we've defined this
+   function to have an easier code. */
+static double
+columns_clump_brightness(double *ci)
+{
+  double tmp;
+  /* Calculate the river flux over the clump area. But only when rivers are
+     present. When grown clumps are requested, the clumps can fully cover a
+     detection (that has one or no clumps). */
+  tmp = ( ci[ CCOL_RIV_NUM ]>0.0f
+          ? ci[ CCOL_RIV_SUM ]/ci[ CCOL_RIV_NUM ]*ci[ CCOL_NUM ]
+          : 0 );
+
+  /* Subtract it from the clump's brightness. */
+  return ci[ CCOL_NUM ]>0.0f ? (ci[ CCOL_SUM ] - tmp) : NAN;
+}
+
+
+
+
+
 /* The magnitude error is directly derivable from the S/N:
 
    To derive the error in measuring the magnitude from the S/N, let's take
@@ -1653,6 +1713,20 @@ columns_fill(struct mkcatalog_passparams *pp)
           ((float *)colarr)[oind] = MKC_MAG(oi[ OCOL_UPPERLIMIT_B ]);
           break;
 
+        case UI_KEY_UPPERLIMITONESIGMA:
+          ((float *)colarr)[oind] = oi[ OCOL_UPPERLIMIT_S ];
+          break;
+
+        case UI_KEY_UPPERLIMITSIGMA:
+          ((float *)colarr)[oind] = ( ( oi[ OCOL_NUM ]>0.0f
+                                        ? oi[ OCOL_SUM ] : NAN )
+                                      / oi[ OCOL_UPPERLIMIT_S ] );
+          break;
+
+        case UI_KEY_UPPERLIMITQUANTILE:
+          ((float *)colarr)[oind] = oi[ OCOL_UPPERLIMIT_Q ];
+          break;
+
         case UI_KEY_SN:
           ((float *)colarr)[oind] = columns_sn(p, oi, 0);
           break;
@@ -1790,17 +1864,7 @@ columns_fill(struct mkcatalog_passparams *pp)
             break;
 
           case UI_KEY_BRIGHTNESS:
-            /* Calculate the river flux over the clump area. But only when
-               rivers are present. When grown clumps are requested, the
-               clumps can fully cover a detection (that has one or no
-               clumps). */
-            tmp = ( ci[ CCOL_RIV_NUM ]>0.0f
-                    ? ci[ CCOL_RIV_SUM ]/ci[ CCOL_RIV_NUM ]*ci[ CCOL_NUM ]
-                    : 0 );
-
-            /* Subtract it from the clump's brightness. */
-            ((float *)colarr)[cind] = ( ci[ CCOL_NUM ]>0.0f
-                                        ? (ci[ CCOL_SUM ] - tmp) : NAN );
+            ((float *)colarr)[cind] = columns_clump_brightness(ci);
             break;
 
           case UI_KEY_NORIVERBRIGHTNESS:
@@ -1809,16 +1873,8 @@ columns_fill(struct mkcatalog_passparams *pp)
             break;
 
           case UI_KEY_MEAN:
-            /* Similar to brightness. */
-            tmp = ( ci[ CCOL_RIV_NUM ]>0.0f
-                    ? ci[ CCOL_RIV_SUM ]/ci[ CCOL_RIV_NUM ]
-                    : 0 );
-
-            /* Subtract it from the clump's mean. */
-            ((float *)colarr)[cind] = ( ci[ CCOL_NUM ]>0.0f
-                                        ? (ci[CCOL_SUM]/ci[CCOL_NUM] - tmp)
-                                        : NAN );
-
+            ((float *)colarr)[cind] = ( columns_clump_brightness(ci)
+                                        /ci[CCOL_NUM] );
             break;
 
           case UI_KEY_MEDIAN:
@@ -1845,6 +1901,18 @@ columns_fill(struct mkcatalog_passparams *pp)
             ((float *)colarr)[cind] = MKC_MAG(ci[ CCOL_UPPERLIMIT_B ]);
             break;
 
+          case UI_KEY_UPPERLIMITONESIGMA:
+            ((float *)colarr)[cind] = ci[ CCOL_UPPERLIMIT_S ];
+            break;
+
+          case UI_KEY_UPPERLIMITSIGMA:
+            ((float *)colarr)[cind] = ( columns_clump_brightness(ci)
+                                        / ci[ CCOL_UPPERLIMIT_S ] );
+
+          case UI_KEY_UPPERLIMITQUANTILE:
+            ((float *)colarr)[cind] = ci[ CCOL_UPPERLIMIT_Q ];
+            break;
+
           case UI_KEY_RIVERAVE:
             ((float *)colarr)[cind] = ( ci[ CCOL_RIV_NUM]
                                         ? ci[ CCOL_RIV_SUM ]/ci[ CCOL_RIV_NUM]
diff --git a/bin/mkcatalog/main.h b/bin/mkcatalog/main.h
index 7b5f0c2..632dd64 100644
--- a/bin/mkcatalog/main.h
+++ b/bin/mkcatalog/main.h
@@ -89,7 +89,9 @@ enum objectcols
     OCOL_GXX,            /* Second order geometric variable: X*X.     */
     OCOL_GYY,            /* Second order geometric variable: Y*Y.     */
     OCOL_GXY,            /* Second order geometric variable: X*Y.     */
-    OCOL_UPPERLIMIT_B,   /* Upper limit magnitude.                    */
+    OCOL_UPPERLIMIT_B,   /* Upper limit brightness.                   */
+    OCOL_UPPERLIMIT_S,   /* Upper limit one-sigma value.              */
+    OCOL_UPPERLIMIT_Q,   /* Quantile of object in random distribution.*/
     OCOL_C_NUMALL,       /* Value independent no. of pixels in clumps.*/
     OCOL_C_NUM,          /* Area of clumps in this object.            */
     OCOL_C_SUM,          /* Brightness in object clumps.              */
@@ -129,7 +131,9 @@ enum clumpcols
     CCOL_GXX,            /* Second order geometric moment.            */
     CCOL_GYY,            /* Second order geometric moment.            */
     CCOL_GXY,            /* Second order geometric moment.            */
-    CCOL_UPPERLIMIT_B,   /* Upper limit magnitude.                    */
+    CCOL_UPPERLIMIT_B,   /* Upper limit brightness.                   */
+    CCOL_UPPERLIMIT_S,   /* Upper limit one-sigma value.              */
+    CCOL_UPPERLIMIT_Q,   /* Quantile of object in random distribution.*/
 
     CCOL_NUMCOLS,        /* SHOULD BE LAST: total number of columns.  */
   };
diff --git a/bin/mkcatalog/mkcatalog.c b/bin/mkcatalog/mkcatalog.c
index 2fafb30..e09929c 100644
--- a/bin/mkcatalog/mkcatalog.c
+++ b/bin/mkcatalog/mkcatalog.c
@@ -624,7 +624,7 @@ mkcatalog_single_object(void *in_prm)
       mkcatalog_first_pass(&pp);
 
       /* Currently the second pass is only necessary when there is a clumps
-         image or the median is requested. */
+         image. */
       if(p->clumps)
         {
           /* Allocate space for the properties of each clump. */
diff --git a/bin/mkcatalog/mkcatalog.h b/bin/mkcatalog/mkcatalog.h
index 959b309..4e93c35 100644
--- a/bin/mkcatalog/mkcatalog.h
+++ b/bin/mkcatalog/mkcatalog.h
@@ -33,11 +33,11 @@ struct mkcatalog_passparams
   gal_data_t          *tile;    /* The tile to pass-over.               */
   float               *st_i;    /* Starting pointer for input image.    */
   int32_t             *st_o;    /* Starting pointer for objects image.  */
-  int32_t             *st_c;    /* Starting pointer for objects image.  */
+  int32_t             *st_c;    /* Starting pointer for clumps image.   */
   float             *st_sky;    /* Starting pointer for input image.    */
   float             *st_std;    /* Starting pointer for input image.    */
   size_t   start_end_inc[2];    /* Starting and ending indexs.          */
-  size_t             *shift;    /* Shift coordinates for coordinates.   */
+  size_t             *shift;    /* Shift coordinates.                   */
   gsl_rng              *rng;    /* Random number generator.             */
   size_t    clumpstartindex;    /* Clump starting row in final catalog. */
   gal_data_t       *up_vals;    /* Container for upper-limit values.    */
diff --git a/bin/mkcatalog/ui.h b/bin/mkcatalog/ui.h
index 7ecc919..1d8d28d 100644
--- a/bin/mkcatalog/ui.h
+++ b/bin/mkcatalog/ui.h
@@ -127,6 +127,9 @@ enum option_keys_enum
   UI_KEY_MEDIAN,
   UI_KEY_CLUMPSMAGNITUDE,
   UI_KEY_UPPERLIMIT,
+  UI_KEY_UPPERLIMITONESIGMA,
+  UI_KEY_UPPERLIMITSIGMA,
+  UI_KEY_UPPERLIMITQUANTILE,
   UI_KEY_RIVERAVE,
   UI_KEY_RIVERNUM,
   UI_KEY_SKY,
diff --git a/bin/mkcatalog/upperlimit.c b/bin/mkcatalog/upperlimit.c
index 58424ec..d7837b7 100644
--- a/bin/mkcatalog/upperlimit.c
+++ b/bin/mkcatalog/upperlimit.c
@@ -34,6 +34,8 @@ along with Gnuastro. If not, see 
<http://www.gnu.org/licenses/>.
 #include <gnuastro/statistics.h>
 
 #include "main.h"
+
+#include "ui.h"
 #include "mkcatalog.h"
 
 
@@ -45,7 +47,7 @@ static gal_data_t *
 upperlimit_make_clump_tiles(struct mkcatalog_passparams *pp)
 {
   gal_data_t *input=pp->p->input;
-  size_t ndim=input->ndim, *dsize=input->dsize;
+  size_t ndim=input->ndim, *tsize=pp->tile->dsize;
 
   int32_t *O, *C;
   gal_data_t *tiles=NULL;
@@ -68,7 +70,8 @@ upperlimit_make_clump_tiles(struct mkcatalog_passparams *pp)
         minmax[ i * width + ndim + d ] = 0;                /* Maximum. */
       }
 
-  /* Parse over the object and get the clump's minimum and maximum. */
+  /* Parse over the object and get the clump's minimum and maximum
+     positions.*/
   while( pp->start_end_inc[0] + increment <= pp->start_end_inc[1] )
     {
       /* Set the pointers for this tile. */
@@ -77,14 +80,15 @@ upperlimit_make_clump_tiles(struct mkcatalog_passparams *pp)
       C = pp->st_c + increment;
 
       /* Go over the contiguous region. */
-      II = I + dsize[ndim-1];
+      II = I + tsize[ndim-1];
       do
         {
           /* Only consider clumps. */
           if( *O==pp->object && *C>0 )
             {
               /* Get the coordinates of this pixel. */
-              gal_dimension_index_to_coord(I-start, ndim, dsize, coord);
+              gal_dimension_index_to_coord(I-start, ndim, input->dsize,
+                                           coord);
 
               /* Check to see if this coordinate is the smallest/largest
                  found so far for this label. Note that labels start from
@@ -104,7 +108,7 @@ upperlimit_make_clump_tiles(struct mkcatalog_passparams *pp)
       while(++I<II);
 
       /* Increment to the next contiguous region. */
-      increment += ( gal_tile_block_increment(input, dsize, num_increment++,
+      increment += ( gal_tile_block_increment(input, tsize, num_increment++,
                                               NULL) );
     }
 
@@ -261,17 +265,112 @@ upperlimit_random_position(struct mkcatalog_passparams 
*pp, gal_data_t *tile,
 
 
 
-static double
+/* Given the distribution of values, do the upper-limit calculations. */
+static void
+upperlimit_measure(struct mkcatalog_passparams *pp, int32_t clumplab,
+                   int do_measurement)
+{
+  float *scarr;
+  gal_data_t *column;
+  size_t init_size, col, one=1;
+  struct mkcatalogparams *p=pp->p;
+  gal_data_t *sum, *qfunc=NULL, *sigclip=NULL;
+  double *o = ( clumplab
+                ? &pp->ci[ (clumplab-1) * CCOL_NUMCOLS ]
+                : pp->oi );
+
+  /* If the random distribution exsits, then fill it in. */
+  if(do_measurement)
+    {
+      /* These columns are for both objects and clumps, so if they are
+         requested in objects, they will also be written for clumps here
+         (the order is irrelevant here). */
+      for(column=p->objectcols; column!=NULL; column=column->next)
+        {
+          switch(column->status)
+            {
+            /* Columns that depend on the sigma of the distribution. */
+            case UI_KEY_UPPERLIMIT:
+            case UI_KEY_UPPERLIMITMAG:
+            case UI_KEY_UPPERLIMITSIGMA:
+            case UI_KEY_UPPERLIMITONESIGMA:
+
+              /* We only need to do this once. */
+              if(sigclip==NULL)
+                {
+                  /* Calculate the sigma-clipped standard deviation. Since
+                     it is done in place, the size will change, so we'll
+                     keep the size here and put it back after we are
+                     done. */
+                  init_size=pp->up_vals->size;
+                  sigclip=gal_statistics_sigma_clip(pp->up_vals,
+                                                    p->upsigmaclip[0],
+                                                    p->upsigmaclip[1], 1, 1);
+                  pp->up_vals->size=pp->up_vals->dsize[0]=init_size;
+                  scarr=sigclip->array;
+
+                  /* Write the raw sigma. */
+                  col = clumplab ? CCOL_UPPERLIMIT_S : OCOL_UPPERLIMIT_S;
+                  o[col] = scarr[3];
+
+                  /* Write the multiple of `upnsigma'. */
+                  col = clumplab ? CCOL_UPPERLIMIT_B : OCOL_UPPERLIMIT_B;
+                  o[col] = scarr[3] * p->upnsigma;
+
+                  /* Clean up. */
+                  gal_data_free(sigclip);
+                }
+              break;
+
+            /* Quantile column. */
+            case UI_KEY_UPPERLIMITQUANTILE:
+
+              /* Also only necessary once (if requested multiple times). */
+              if(qfunc==NULL)
+                {
+                  /* Similar to the case for sigma-clipping, we'll need to
+                     keep the size here also. */
+                  init_size=pp->up_vals->size;
+                  sum=gal_data_alloc(NULL, GAL_TYPE_FLOAT32, 1, &one, NULL, 0,
+                                     -1, NULL, NULL, NULL);
+                  ((float *)(sum->array))[0]=o[clumplab?CCOL_SUM:OCOL_SUM];
+                  qfunc=gal_statistics_quantile_function(pp->up_vals, sum, 1);
+
+                  /* Fill in the column. */
+                  col = clumplab ? CCOL_UPPERLIMIT_Q : OCOL_UPPERLIMIT_Q;
+                  pp->up_vals->size=pp->up_vals->dsize[0]=init_size;
+                  o[col] = ((double *)(qfunc->array))[0];
+
+                  /* Clean up. */
+                  gal_data_free(sum);
+                  gal_data_free(qfunc);
+                }
+              break;
+            }
+        }
+    }
+  else
+    {
+      o[ clumplab ? CCOL_UPPERLIMIT_S : OCOL_UPPERLIMIT_S ] = NAN;
+      o[ clumplab ? CCOL_UPPERLIMIT_B : OCOL_UPPERLIMIT_B ] = NAN;
+      o[ clumplab ? CCOL_UPPERLIMIT_Q : OCOL_UPPERLIMIT_Q ] = NAN;
+    }
+}
+
+
+
+
+
+static void
 upperlimit_one_tile(struct mkcatalog_passparams *pp, gal_data_t *tile,
                     unsigned long seed, int32_t clumplab)
 {
   struct mkcatalogparams *p=pp->p;
   size_t ndim=p->input->ndim, *dsize=p->input->dsize;
 
+  double sum;
   void *tarray;
-  double sum, out;
   int continueparse;
-  gal_data_t *sigclip;
   uint8_t *M=NULL, *st_m=NULL;
   float *uparr=pp->up_vals->array;
   float *I, *II, *SK, *st_i, *st_sky;
@@ -373,19 +472,12 @@ upperlimit_one_tile(struct mkcatalog_passparams *pp, 
gal_data_t *tile,
       ++tcounter;
     }
 
-  /* Calculate the standard deviation of this distribution. */
-  if(counter==p->upnum)
-    {
-      sigclip=gal_statistics_sigma_clip(pp->up_vals, p->upsigmaclip[0],
-                                        p->upsigmaclip[1], 1, 1);
-      out = ((float *)(sigclip->array))[3] * p->upnsigma;
-    }
-  else out=NAN;
+  /* Do the measurement on the random distribution. */
+  upperlimit_measure(pp, clumplab, counter==p->upnum);
 
   /* Reset the tile's array pointer, clean up and return. */
   tile->array=tarray;
   free(rcoord);
-  return out;
 }
 
 
@@ -414,16 +506,14 @@ void
 upperlimit_calculate(struct mkcatalog_passparams *pp)
 {
   size_t i;
-  double *ci;
   unsigned long seed;
   gal_data_t *clumptiles;
   struct mkcatalogparams *p=pp->p;
 
   /* First find the upper limit magnitude for this object. */
-  pp->oi[OCOL_UPPERLIMIT_B] = upperlimit_one_tile(pp, pp->tile,
-                                                  p->seed+pp->object, 0);
+  upperlimit_one_tile(pp, pp->tile, p->seed+pp->object, 0);
 
-  /* If a clumps image is present (a clump catalog is requested( and this
+  /* If a clumps image is present (a clump catalog is requested) and this
      object has clumps, then find the upper limit magnitude for the clumps
      within this object. */
   if(p->clumps && pp->clumpsinobj)
@@ -438,10 +528,8 @@ upperlimit_calculate(struct mkcatalog_passparams *pp)
          IDs. */
       for(i=0;i<pp->clumpsinobj;++i)
         {
-          ci=&pp->ci[ i * CCOL_NUMCOLS ];
           seed = p->seed + p->numobjects + p->numclumps * pp->object + i;
-          ci[CCOL_UPPERLIMIT_B] = upperlimit_one_tile(pp, &clumptiles[i],
-                                                          seed, i+1);
+          upperlimit_one_tile(pp, &clumptiles[i], seed, i+1);
         }
 
       /* Clean up the clump tiles. */
diff --git a/bin/noisechisel/clumps.c b/bin/noisechisel/clumps.c
index 66de8cc..cd2ce59 100644
--- a/bin/noisechisel/clumps.c
+++ b/bin/noisechisel/clumps.c
@@ -696,7 +696,7 @@ enum infocols
     INFO_X,              /* Flux weighted X center col, 0 by C std. */
     INFO_Y,              /* Flux weighted Y center col.             */
     INFO_Z,              /* Flux weighted Z center col.             */
-    INFO_NFF,            /* Number of non-negative pixels (for X,Y).*/
+    INFO_SFF,            /* Sum of non-negative pixels (for X,Y).   */
     INFO_INFLUX,         /* Tatal flux within clump.                */
     INFO_INAREA,         /* Tatal area within clump.                */
     INFO_RIVFLUX,        /* Tatal flux within rivers around clump.  */
@@ -735,7 +735,7 @@ clumps_get_raw_info(struct clumps_thread_params *cltprm)
             if( arr[*a]>0.0f )
               {
                 gal_dimension_index_to_coord(*a, ndim, dsize, coord);
-                info[ lab * INFO_NCOLS + INFO_NFF ] += arr[*a];
+                info[ lab * INFO_NCOLS + INFO_SFF ] += arr[*a];
                 info[ lab * INFO_NCOLS + INFO_X   ] += arr[*a] * coord[0];
                 info[ lab * INFO_NCOLS + INFO_Y   ] += arr[*a] * coord[1];
                 if(ndim==3)
@@ -797,14 +797,14 @@ clumps_get_raw_info(struct clumps_thread_params *cltprm)
           /* Especially over the undetected regions, it might happen that
              none of the pixels were positive. In that case, set the total
              area of the clump to zero so it is no longer considered.*/
-          if( row[INFO_NFF]==0.0f ) row[INFO_INAREA]=0;
+          if( row[INFO_SFF]==0.0f ) row[INFO_INAREA]=0;
           else
             {
               /* Find the coordinates of the clump's weighted center. */
-              coord[0]=GAL_DIMENSION_FLT_TO_INT(row[INFO_X]/row[INFO_NFF]);
-              coord[1]=GAL_DIMENSION_FLT_TO_INT(row[INFO_Y]/row[INFO_NFF]);
+              coord[0]=GAL_DIMENSION_FLT_TO_INT(row[INFO_X]/row[INFO_SFF]);
+              coord[1]=GAL_DIMENSION_FLT_TO_INT(row[INFO_Y]/row[INFO_SFF]);
               if(ndim==3)
-                coord[2]=GAL_DIMENSION_FLT_TO_INT(row[INFO_Z]/row[INFO_NFF]);
+                coord[2]=GAL_DIMENSION_FLT_TO_INT(row[INFO_Z]/row[INFO_SFF]);
 
               /* Find the corresponding standard deviation. */
               fullid=gal_tile_full_id_from_coord(&p->cp.tl, coord);
@@ -812,8 +812,8 @@ clumps_get_raw_info(struct clumps_thread_params *cltprm)
 
               /* For a check
               printf("---------\n");
-              printf("\t%f --> %zu\n", row[INFO_Y]/row[INFO_NFF], coord[1]);
-              printf("\t%f --> %zu\n", row[INFO_X]/row[INFO_NFF], coord[0]);
+              printf("\t%f --> %zu\n", row[INFO_Y]/row[INFO_SFF], coord[1]);
+              printf("\t%f --> %zu\n", row[INFO_X]/row[INFO_SFF], coord[0]);
               printf("%u: (%zu, %zu): %.3f\n", lab, coord[1]+1,
                      coord[0]+1, row[INFO_INSTD]);
               */
@@ -821,6 +821,7 @@ clumps_get_raw_info(struct clumps_thread_params *cltprm)
         }
     }
 
+
   /* Clean up. */
   free(dinc);
   free(ngblabs);
@@ -842,7 +843,6 @@ clumps_make_sn_table(struct clumps_thread_params *cltprm)
   int sky0_det1=cltprm->clprm->sky0_det1;
   size_t i, ind, counter=0, infodsize[2]={tablen, INFO_NCOLS};
 
-
   /* If there were no initial clumps, then ignore this function. */
   if(cltprm->numinitclumps==0) { cltprm->snind=cltprm->sn=NULL; return; }
 
@@ -1298,8 +1298,8 @@ clumps_true_find_sn_thresh(struct noisechiselparams *p)
           /* Set the extension name. */
           switch(clprm.step)
             {
-            case 1: p->clabel->name = "SKY_CLUMPS_ALL";  break;
-            case 2: p->clabel->name = "SKY_CLUMPS_FOR_SN";   break;
+            case 1: p->clabel->name = "SKY_CLUMPS_ALL";    break;
+            case 2: p->clabel->name = "SKY_CLUMPS_FOR_SN"; break;
             default:
               error(EXIT_FAILURE, 0, "%s: a bug! Please contact us at %s so "
                     "we can address the issue. The value %d is not valid for "
@@ -1443,20 +1443,17 @@ clumps_det_label_indexs(struct noisechiselparams *p)
   int32_t *a, *l, *lf;
   gal_data_t *labindexs=gal_data_array_calloc(p->numdetections+1);
 
-  /* Find the area in each detected objects (to see how much space we need
-     to allocate). */
+  /* Find the area in each detected object (to see how much space we need
+     to allocate). If blank values are present, an extra check is
+     necessary, so to get faster results when there aren't any blank
+     values, we'll also do a check. */
   areas=gal_data_calloc_array(GAL_TYPE_SIZE_T, p->numdetections+1, __func__,
                               "areas");
-  if(gal_blank_present(p->input, 1))
-    {
-      lf=(l=p->olabel->array)+p->olabel->size; /* Blank pixels have a      */
-      do if(*l>0) ++areas[*l]; while(++l<lf);  /* negative value in int32. */
-    }
-  else
-    {
-      lf=(l=p->olabel->array)+p->olabel->size; do ++areas[*l]; while(++l<lf);
-      areas[0]=0;
-    }
+  lf=(l=p->olabel->array)+p->olabel->size;
+  do
+    if(*l>0)  /* Only labeled regions: *l==0 (undetected), *l<0 (blank). */
+      ++areas[*l];
+  while(++l<lf);
 
   /* For a check.
   for(i=0;i<p->numdetections+1;++i)
diff --git a/bin/noisechisel/detection.c b/bin/noisechisel/detection.c
index 2b24e2d..d924715 100644
--- a/bin/noisechisel/detection.c
+++ b/bin/noisechisel/detection.c
@@ -901,19 +901,19 @@ detection_remove_false_initial(struct noisechiselparams 
*p,
 {
   size_t i;
   uint8_t *b=workbin->array;
+  float *e_th, *arr=p->conv->array;
   int32_t *l=p->olabel->array, *lf=l+p->olabel->size, curlab=1;
   int32_t *newlabels=gal_data_calloc_array(GAL_TYPE_UINT32,
                                            p->numinitialdets+1, __func__,
                                            "newlabels");
 
-
   /* Find the new labels for all the existing labels. Recall that
      `newlabels' was initialized to zero, so any label that is not given a
      new label here will be automatically removed. After the first pixel of
      a label overlaps with dbyt[i], we don't need to check the rest of that
      object's pixels. At this point, tokeep is only binary: 0 or 1.
 
-     Note that the zeroth element of tokeep can also be non zero, this is
+     Note that the zeroth element of `tokeep' can also be non zero, this is
      because the holes of the labeled regions might be filled during
      filling the holes, but have not been filled in the original labeled
      array. They are not important so you can just ignore them. */
@@ -937,27 +937,59 @@ detection_remove_false_initial(struct noisechiselparams 
*p,
   for(i=0;i<p->numinitialdets;++i) if(newlabels[i]) newlabels[i] = curlab++;
 
 
-  /* Replace the byt and olab values with their proper values. If the user
-     doesn't want to grow, then change the labels in `lab' too. Otherwise,
-     you don't have to worry about the label array. After dilation a new
-     labeling will be done and the whole labeled array will be re-filled.*/
-  b=workbin->array; l=p->olabel->array;
-  if(p->detgrowquant==1.0f)          /* We need the binary array even when */
-    do                               /* there is no growth: the binary     */
-      {                              /* array is used for estimating the   */
-        if(*l!=GAL_BLANK_INT32)      /* Sky and its STD. */
-          *b = ( *l = newlabels[ *l ] ) > 0;
-        ++b;
-      }
-    while(++l<lf);
-  else
+  /* Replace the byt and olab values with their proper values. Note that we
+     need the binary array when there is no growth also. The binary array
+     is later used in estimating the sky and its STD.
+
+     See if growth is necessary or not. Note that even if the user has
+     asked for growth, if the edges of the objects in the image are sharp
+     enough, no growth will be necessary (and thus the labeled image won't
+     be re-written during growth). So it is necessary to check for growth
+     here and later do it in `detection_quantile_expand'. */
+  p->numexpand=0;
+  b=workbin->array;
+  l=p->olabel->array;
+  if(p->detgrowquant==1.0)
     do
-      {
-        if(*l!=GAL_BLANK_INT32)
-          *b = newlabels[ *l ] > 0;
+      {                                       /* No growth will happen.  */
+        if(*l!=GAL_BLANK_INT32)               /* So set both the binary  */
+          *b = ( *l = newlabels[ *l ] ) > 0;  /* AND label images.       */
         ++b;
       }
     while(++l<lf);
+  else
+    {
+      /* Expand the threshold values (from one value for each tile) to the
+         whole dataset. Since we know the tiles cover the whole image, we
+         don't neeed to initialize or check for blanks. */
+      p->exp_thresh_full=gal_tile_block_write_const_value(p->expand_thresh,
+                                                          p->cp.tl.tiles, 0,
+                                                          0);
+
+      /* Remove the false detections and count how many pixels need to
+         grow. */
+      e_th=p->exp_thresh_full->array;
+      do                                    /* Growth is necessary later.  */
+        {                                   /* So there is no need to set  */
+          if(*l!=GAL_BLANK_INT32)           /* the labels image, but we    */
+            {                               /* have to count the number of */
+              *b = newlabels[ *l ] > 0;     /* pixels to (possibly) grow.  */
+              if( *b==0 && *arr>*e_th )
+                ++p->numexpand;
+            }
+          ++b; ++arr; ++e_th;
+        }
+      while(++l<lf);
+
+      /* If there aren't any pixels to later expand, then reset the labels
+         (remove false detections in the labeled image). */
+      if(p->numexpand==0)
+        {
+          b=workbin->array;
+          l=p->olabel->array;
+          do if(*l!=GAL_BLANK_INT32) *l = newlabels[ *l ]; while(++l<lf);
+        }
+    }
 
 
   /* Clean up and return. */
@@ -969,106 +1001,92 @@ detection_remove_false_initial(struct noisechiselparams 
*p,
 
 
 
+/* Expand the initial detections based on the quantile threshold and then
+   label the connected regions. If expansion is not possible, then return
+   the `GAL_BLANK_SIZET'.*/
 static size_t
 detection_quantile_expand(struct noisechiselparams *p, gal_data_t *workbin)
 {
   int32_t *o, *of;
-  size_t *d, counter=0, numexpanded;
+  size_t *d, numexpanded=0;
+  gal_data_t *diffuseindexs;
   float *i, *e_th, *arr=p->conv->array;
-  gal_data_t *exp_thresh_full, *diffuseindexs;
   uint8_t *b=workbin->array, *bf=b+workbin->size;
 
-  /* Expand the threshold values (from one value for each tile) to the
-     whole dataset. Since we know the tiles cover the whole image, we don't
-     neeed to initialize or check for blanks.*/
-  exp_thresh_full=gal_tile_block_write_const_value(p->expand_thresh,
-                                                   p->cp.tl.tiles, 0, 0);
-
-
-  /* Count the pixels that must be expanded. */
-  e_th=exp_thresh_full->array;
-  do { if(*b++==0 && *arr>*e_th) ++counter; ++arr; ++e_th; } while(b<bf);
-
-  /* Allocate the space necessary to keep the index of all the pixels that
-     must be expanded and re-initialize the necessary pointers. */
-  diffuseindexs=gal_data_alloc(NULL, GAL_TYPE_SIZE_T, 1, &counter, NULL, 0,
-                               p->cp.minmapsize, NULL, NULL, NULL);
-
-  /* Fill in the diffuse indexs and initialize the objects dataset. */
-  b=workbin->array;
-  arr=p->conv->array;
-  d=diffuseindexs->array;
-  e_th=exp_thresh_full->array;
-  of=(o=p->olabel->array)+p->olabel->size;
-  do
+  /* Only continue if there actually are any pixels to expand (this can
+     happen!). */
+  if(p->numexpand)
     {
-      /* If the binary value is 1, then we want an initial label of 1 (the
-         object is already detected). If it isn't, then we only want it if
-         it is above the threshold. */
-      *o = *b==1 ? 1 : ( *arr>*e_th ? CLUMPS_INIT : 0);
-      if(*b==0 && *arr>*e_th)
-        *d++ = o - (int32_t *)(p->olabel->array);
-
-      /* Increment the points and go onto the next pixel. */
-      ++b;
-      ++arr;
-      ++e_th;
-    }
-  while(++o<of);
-
-  /* Expand the detections. */
-  clumps_grow(p->olabel, diffuseindexs, 0, p->olabel->ndim);
+      /* Allocate the space necessary to keep the index of all the pixels
+         that must be expanded and re-initialize the necessary pointers. */
+      diffuseindexs=gal_data_alloc(NULL, GAL_TYPE_SIZE_T, 1, &p->numexpand,
+                                   NULL, 0, p->cp.minmapsize, NULL, NULL,
+                                   NULL);
 
+      /* Fill in the diffuse indexs and initialize the objects dataset. */
+      b=workbin->array;
+      arr=p->conv->array;
+      d=diffuseindexs->array;
+      e_th=p->exp_thresh_full->array;
+      of=(o=p->olabel->array)+p->olabel->size;
+      do
+        {
+          /* If the binary value is 1, then we want an initial label of 1
+             (the object is already detected). If it isn't, then we only
+             want it if it is above the threshold. */
+          *o = *b==1 ? 1 : ( *arr>*e_th ? CLUMPS_INIT : 0);
+          if(*b==0 && *arr>*e_th)
+            *d++ = o - (int32_t *)(p->olabel->array);
+
+          /* Increment the points and go onto the next pixel. */
+          ++b;
+          ++arr;
+          ++e_th;
+        }
+      while(++o<of);
 
-  /* Only keep the 1 valued pixels in the binary array. */
-  o=p->olabel->array;
-  bf=(b=workbin->array)+workbin->size;
-  do *b = (*o++ == 1); while(++b<bf);
-  workbin=gal_binary_dilate(workbin, 1, 1, 1);
-  if(p->detectionname)
-    {
-      workbin->name="TRUE-DETECTIONS-GROWN";
-      gal_fits_img_write(workbin, p->detectionname, NULL, PROGRAM_NAME);
-      workbin->name=NULL;
-    }
+      /* Expand the detections. */
+      clumps_grow(p->olabel, diffuseindexs, 0, p->olabel->ndim);
 
-  /* Fill the holes */
-  gal_binary_fill_holes(workbin, 1, p->detgrowmaxholesize);
-  if(p->detectionname)
-    {
-      workbin->name="TRUE-DETECTIONS-HOLES-FILLED";
-      gal_fits_img_write(workbin, p->detectionname, NULL, PROGRAM_NAME);
-      workbin->name=NULL;
-    }
+      /* Only keep the 1 valued pixels in the binary array and fill its
+         holes. */
+      o=p->olabel->array;
+      bf=(b=workbin->array)+workbin->size;
+      do *b = (*o++ == 1); while(++b<bf);
+      workbin=gal_binary_dilate(workbin, 1, 1, 1);
+      gal_binary_fill_holes(workbin, 1, p->detgrowmaxholesize);
 
-  /* Get the labeled image. */
-  numexpanded=gal_binary_connected_components(workbin, &p->olabel,
-                                              workbin->ndim);
+      /* Get the labeled image. */
+      numexpanded=gal_binary_connected_components(workbin, &p->olabel,
+                                                  workbin->ndim);
 
-  /* Set all the input's blank pixels to blank in the labeled and binary
-     arrays. */
-  if( gal_blank_present(p->input, 1) )
-    {
-      b=workbin->array;
-      i=p->input->array;
-      of=(o=p->olabel->array)+p->olabel->size;
-      do
+      /* Set all the input's blank pixels to blank in the labeled and
+         binary arrays. */
+      if( gal_blank_present(p->input, 1) )
         {
-          if(isnan(*i++))
+          b=workbin->array;
+          i=p->input->array;
+          of=(o=p->olabel->array)+p->olabel->size;
+          do
             {
-              *o=GAL_BLANK_INT32;
-              *b=GAL_BLANK_UINT8;
+              if(isnan(*i++))
+                {
+                  *o=GAL_BLANK_INT32;
+                  *b=GAL_BLANK_UINT8;
+                }
+              ++b;
             }
-          ++b;
+          while(++o<of);
         }
-      while(++o<of);
+
+      /* Clean up. */
+      gal_data_free(diffuseindexs);
     }
 
   /* Clean up and return */
   gal_data_free(p->expand_thresh);
-  gal_data_free(exp_thresh_full);
-  gal_data_free(diffuseindexs);
-  return numexpanded;
+  gal_data_free(p->exp_thresh_full);
+  return numexpanded ? numexpanded : GAL_BLANK_SIZE_T;
 }
 
 
@@ -1083,7 +1101,7 @@ detection(struct noisechiselparams *p)
   char *msg;
   gal_data_t *workbin;
   struct timeval t0, t1;
-  size_t num_true_initial;
+  size_t num_true_initial, num_expanded;
 
   /* Report for the user. */
   if(!p->cp.quiet)
@@ -1136,7 +1154,14 @@ detection(struct noisechiselparams *p)
      final number of detections. */
   if(!p->cp.quiet) gettimeofday(&t1, NULL);
   if(p->detgrowquant!=1.0f)
-    num_true_initial=detection_quantile_expand(p, workbin);
+    {
+      num_expanded=detection_quantile_expand(p, workbin);
+      if(num_expanded!=GAL_BLANK_SIZE_T)
+        num_true_initial=num_expanded;
+    }
+
+
+  /* Update the user on the progress, if necessary. */
   if(!p->cp.quiet)
     {
       if(p->detgrowquant==1.0f)
@@ -1147,9 +1172,18 @@ detection(struct noisechiselparams *p)
         }
       else
         {
-          if( asprintf(&msg, "%zu detections after growth to %.3f quantile.",
-                       num_true_initial, p->detgrowquant)<0 )
-            error(EXIT_FAILURE, 0, "%s: asprintf allocation", __func__);
+          if(num_expanded==GAL_BLANK_SIZE_T)
+            {
+              if(asprintf(&msg, "%zu detections (no growth to %.3f "
+                          "quantile).", num_true_initial, p->detgrowquant)<0)
+                error(EXIT_FAILURE, 0, "%s: asprintf allocation", __func__);
+            }
+          else
+            {
+              if( asprintf(&msg, "%zu detections after growth to %.3f "
+                           "quantile.", num_true_initial, p->detgrowquant)<0 )
+                error(EXIT_FAILURE, 0, "%s: asprintf allocation", __func__);
+            }
         }
       gal_timing_report(&t1, msg, 2);
       free(msg);
diff --git a/bin/noisechisel/main.h b/bin/noisechisel/main.h
index 003cacc..ed78ef1 100644
--- a/bin/noisechisel/main.h
+++ b/bin/noisechisel/main.h
@@ -37,8 +37,6 @@ along with Gnuastro. If not, see 
<http://www.gnu.org/licenses/>.
 
 
 
-
-
 /* Main program parameters structure */
 struct noisechiselparams
 {
@@ -113,12 +111,14 @@ struct noisechiselparams
   gal_data_t          *olabel;  /* Labels of objects in the detection.    */
   gal_data_t          *clabel;  /* Labels of clumps in the detection.     */
   gal_data_t   *expand_thresh;  /* Quantile threshold to expand per tile. */
+  gal_data_t *exp_thresh_full;  /* Full array containing growth thresh.   */
   gal_data_t             *sky;  /* Mean of undetected pixels, per tile.   */
   gal_data_t             *std;  /* STD of undetected pixels, per tile.    */
   size_t           maxtcontig;  /* Maximum contiguous space for a tile.   */
   size_t          maxltcontig;  /* Maximum contiguous space for a tile.   */
   size_t            *maxtsize;  /* Maximum size of a single small tile.   */
   size_t           *maxltsize;  /* Maximum size of a single large tile.   */
+  size_t            numexpand;  /* Initial number of pixels to expand.    */
   time_t              rawtime;  /* Starting time of the program.          */
 
   float                medstd;  /* Median STD before interpolation.       */
diff --git a/bin/noisechisel/noisechisel.c b/bin/noisechisel/noisechisel.c
index 2a67232..a9e4f60 100644
--- a/bin/noisechisel/noisechisel.c
+++ b/bin/noisechisel/noisechisel.c
@@ -61,33 +61,43 @@ noisechisel_convolve(struct noisechiselparams *p)
   /* Convovle with sharper kernel. */
   if(p->conv==NULL)
     {
-      /* Make the convolved image. */
-      if(!p->cp.quiet) gettimeofday(&t1, NULL);
-      p->conv = gal_convolve_spatial(tl->tiles, p->kernel, p->cp.numthreads,
-                                     1, tl->workoverch);
-
-      /* Report and write check images if necessary. */
-      if(!p->cp.quiet)
+      /* Do the convolution if a kernel was requested. */
+      if(p->kernel)
         {
-          if(p->widekernel)
-            gal_timing_report(&t1, "Convolved with sharper kernel.", 1);
-          else
-            gal_timing_report(&t1, "Convolved with given kernel.", 1);
+          /* Make the convolved image. */
+          if(!p->cp.quiet) gettimeofday(&t1, NULL);
+          p->conv = gal_convolve_spatial(tl->tiles, p->kernel,
+                                         p->cp.numthreads, 1, tl->workoverch);
+
+          /* Report and write check images if necessary. */
+          if(!p->cp.quiet)
+            {
+              if(p->widekernel)
+                gal_timing_report(&t1, "Convolved with sharper kernel.", 1);
+              else
+                gal_timing_report(&t1, "Convolved with given kernel.", 1);
+            }
         }
+      else
+        p->conv=p->input;
     }
 
   /* Set a fixed name for the convolved image (since it will be used in
      many check images). */
-  if(p->conv->name) free(p->conv->name);
-  gal_checkset_allocate_copy( ( p->widekernel
-                                ? "CONVOLVED-SHARPER"
-                                : "CONVOLVED" ), &p->conv->name);
+  if(p->conv!=p->input)
+    {
+      if(p->conv->name) free(p->conv->name);
+      gal_checkset_allocate_copy( ( p->widekernel
+                                    ? "CONVOLVED-SHARPER"
+                                    : "CONVOLVED" ), &p->conv->name);
+    }
 
   /* Save the convolution step if necessary. */
   if(p->detectionname)
     {
       gal_fits_img_write(p->input, p->detectionname, NULL, PROGRAM_NAME);
-      gal_fits_img_write(p->conv, p->detectionname, NULL, PROGRAM_NAME);
+      if(p->input!=p->conv)
+        gal_fits_img_write(p->conv, p->detectionname, NULL, PROGRAM_NAME);
     }
 
   /* Convolve with wider kernel (if requested). */
diff --git a/bin/noisechisel/segmentation.c b/bin/noisechisel/segmentation.c
index 3111a5b..c54453b 100644
--- a/bin/noisechisel/segmentation.c
+++ b/bin/noisechisel/segmentation.c
@@ -875,7 +875,8 @@ segmentation(struct noisechiselparams *p)
   if(p->segmentationname)
     {
       gal_fits_img_write(p->input, p->segmentationname, NULL, PROGRAM_NAME);
-      gal_fits_img_write(p->conv, p->segmentationname, NULL, PROGRAM_NAME);
+      if(p->input!=p->conv)
+        gal_fits_img_write(p->conv, p->segmentationname, NULL, PROGRAM_NAME);
       p->olabel->name="DETECTION_LABELS";
       gal_fits_img_write(p->olabel, p->segmentationname, NULL,
                          PROGRAM_NAME);
diff --git a/bin/noisechisel/sky.c b/bin/noisechisel/sky.c
index b9beee8..b6e3ab5 100644
--- a/bin/noisechisel/sky.c
+++ b/bin/noisechisel/sky.c
@@ -258,25 +258,28 @@ sky_subtract(struct noisechiselparams *p)
       /* First subtract the Sky value from the input image. */
       GAL_TILE_PARSE_OPERATE(tile, NULL, 0, 0, {*i-=sky[tid];});
 
-      /* Change to the convolved image. */
-      tarray=tile->array;
-      tblock=tile->block;
-      tile->array=gal_tile_block_relative_to_other(tile, p->conv);
-      tile->block=p->conv;
-
-      /* The threshold is always low. So for the majority of non-NaN
-         pixels in the image, the condition above will be true. If we
-         come over a NaN pixel, then by definition of NaN, all
-         conditionals will fail.
-
-         If an image doesn't have any NaN pixels, only the pixels below
-         the threshold have to be checked for a NaN which are by
-         definition a very small fraction of the total pixels. And if
-         there are NaN pixels in the image. */
-      GAL_TILE_PARSE_OPERATE(tile, NULL, 0, 0, {*i-=sky[tid];});
-
-      /* Revert back to the original block. */
-      tile->array=tarray;
-      tile->block=tblock;
+      /* Change to the convolved image (if there is any). */
+      if(p->conv!=p->input)
+        {
+          tarray=tile->array;
+          tblock=tile->block;
+          tile->array=gal_tile_block_relative_to_other(tile, p->conv);
+          tile->block=p->conv;
+
+          /* The threshold is always low. So for the majority of non-NaN
+             pixels in the image, the condition above will be true. If we
+             come over a NaN pixel, then by definition of NaN, all
+             conditionals will fail.
+
+             If an image doesn't have any NaN pixels, only the pixels below
+             the threshold have to be checked for a NaN which are by
+             definition a very small fraction of the total pixels. And if
+             there are NaN pixels in the image. */
+          GAL_TILE_PARSE_OPERATE(tile, NULL, 0, 0, {*i-=sky[tid];});
+
+          /* Revert back to the original block. */
+          tile->array=tarray;
+          tile->block=tblock;
+        }
     }
 }
diff --git a/bin/noisechisel/ui.c b/bin/noisechisel/ui.c
index 455fc2c..38e49d9 100644
--- a/bin/noisechisel/ui.c
+++ b/bin/noisechisel/ui.c
@@ -259,7 +259,7 @@ ui_read_check_only_options(struct noisechiselparams *p)
           "    $ info gnuastro \"Input Output options\"");
 
   /* Kernel checks. */
-  if(p->kernelname)
+  if(p->kernelname && strcmp(p->kernelname, UI_NO_CONV_KERNEL_NAME))
     {
       /* Check if it exists. */
       gal_checkset_check_file(p->kernelname);
@@ -535,8 +535,13 @@ ui_prepare_kernel(struct noisechiselparams *p)
   /* If a kernel file is given, then use it. Otherwise, use the default
      kernel. */
   if(p->kernelname)
-    p->kernel=gal_fits_img_read_kernel(p->kernelname, p->khdu,
-                                       p->cp.minmapsize);
+    {
+      if( strcmp(p->kernelname, UI_NO_CONV_KERNEL_NAME) )
+        p->kernel=gal_fits_img_read_kernel(p->kernelname, p->khdu,
+                                           p->cp.minmapsize);
+      else
+        p->kernel=NULL;
+    }
   else
     {
       /* Allocate space for the kernel (we don't want to use the statically
@@ -845,9 +850,14 @@ ui_read_check_inputs_setup(int argc, char *argv[],
       else
         {
           if(p->kernelname)
-            printf("  - %s: %s (hdu: %s)\n",
-                   p->widekernelname ? "Sharp Kernel" : "Kernel",
-                   p->kernelname, p->khdu);
+            {
+              if( strcmp(p->kernelname, UI_NO_CONV_KERNEL_NAME) )
+                printf("  - %s: %s (hdu: %s)\n",
+                       p->widekernelname ? "Sharp Kernel" : "Kernel",
+                       p->kernelname, p->khdu);
+              else
+                printf("  - No convolution requested.\n");
+            }
           else
             printf(p->kernel->ndim==2
                    ? "  - %s: FWHM=2 pixel Gaussian.\n"
@@ -945,7 +955,6 @@ ui_free_report(struct noisechiselparams *p, struct timeval 
*t1)
   /* Free the allocated datasets. */
   gal_data_free(p->sky);
   gal_data_free(p->std);
-  gal_data_free(p->conv);
   gal_data_free(p->wconv);
   gal_data_free(p->input);
   gal_data_free(p->kernel);
@@ -953,6 +962,7 @@ ui_free_report(struct noisechiselparams *p, struct timeval 
*t1)
   gal_data_free(p->olabel);
   gal_data_free(p->clabel);
   gal_data_free(p->widekernel);
+  if(p->conv!=p->input) gal_data_free(p->conv);
 
   /* Clean up the tile structure. */
   p->ltl.numchannels=NULL;
diff --git a/bin/noisechisel/ui.h b/bin/noisechisel/ui.h
index d20c2d3..6e33e94 100644
--- a/bin/noisechisel/ui.h
+++ b/bin/noisechisel/ui.h
@@ -30,6 +30,13 @@ along with Gnuastro. If not, see 
<http://www.gnu.org/licenses/>.
 
 
 
+/* Macros. */
+#define UI_NO_CONV_KERNEL_NAME "none"
+
+
+
+
+
 /* Option groups particular to this program. */
 enum program_args_groups
 {
diff --git a/doc/announce-acknowledge.txt b/doc/announce-acknowledge.txt
index 8d8f500..7e8767c 100644
--- a/doc/announce-acknowledge.txt
+++ b/doc/announce-acknowledge.txt
@@ -1,8 +1,11 @@
 People who's help must be acknowledged in the next release.
 
+Nima Dehdilani
 Antonio Diaz Diaz
 Guillaume Mahler
 Ole Streicher
 Michel Tallon
 Juan C. Tello
 Éric Thiébaut
+Aaron Watkins
+Sara Yousefi Taemeh
diff --git a/doc/gnuastro.texi b/doc/gnuastro.texi
index 2abbdf5..81db8c5 100644
--- a/doc/gnuastro.texi
+++ b/doc/gnuastro.texi
@@ -13789,12 +13789,15 @@ Gnuastro's options with the @option{--help} option.
 @item -k STR
 @itemx --kernel=STR
 File name of kernel to smooth the image before applying the threshold, see
-@ref{Convolution kernel}. The first step of NoiseChisel is to
-convolve/smooth the image and use the convolved image in multiple steps
-during the processing. It will be used to define (and later apply) the
-quantile threshold (see @option{--qthresh}). The convolved image is also
-used to define the clumps (see Section 3.2.1 and Figure 8 of
-@url{https://arxiv.org/abs/1505.01664, Akhlaghi and Ichikawa [2015]}).
+@ref{Convolution kernel}. If no convolution is needed, give this option a
+value of @option{none}.
+
+The first step of NoiseChisel is to convolve/smooth the image and use the
+convolved image in multiple steps during the processing. It will be used to
+define (and later apply) the quantile threshold (see
+@option{--qthresh}). The convolved image is also used to define the clumps
+(see Section 3.2.1 and Figure 8 of @url{https://arxiv.org/abs/1505.01664,
+Akhlaghi and Ichikawa [2015]}).
 
 To build any kernel in one command (for input into this option), you can
 use MakeProfile's @option{--kernel} option, see @ref{MakeProfiles output
@@ -14737,62 +14740,66 @@ transformations on the labeled image.
 No measurement on a real dataset can be perfect: you can only reach a
 certain level/limit of accuracy. Therefore, a meaningful (scientific)
 analysis requires an understanding of these limits for the dataset and your
-analysis tools: different datasets (images in the case of MakeCatalog) have
-different noise properties and different detection methods (one
-method/algorith/software that is run with a different set of parameters is
-considered as a different detection method) will have different abilities
-to detect or measure certain kinds of signal (astronomical objects) and
-their properties in an image. Hence, quantifying the detection and
-measurement limitations with a particular dataset and analysis tool is the
-most crucial/critical aspect of any high-level analysis.
-
-In this section we discuss some of the most general limits that are very
-important in any astronomical data analysis and how MakeCatalog makes it
-easy to find them. Depending on the higher-level analysis, there are more
-tests that must be done, but these are usually necessary in any case. In
-astronomy, it is common to use the magnitude (a unit-less scale) and
-physical units, see @ref{Flux Brightness and magnitude}. Therefore all the
-measurements discussed here are defined in units of magnitudes.
+analysis tools: different datasets have different noise properties and
+different detection methods (one method/algorith/software that is run with
+a different set of parameters is considered as a different detection
+method) will have different abilities to detect or measure certain kinds of
+signal (astronomical objects) and their properties in the dataset. Hence,
+quantifying the detection and measurement limitations with a particular
+dataset and analysis tool is the most crucial/critical aspect of any
+high-level analysis.
+
+Here, we'll review some of the most general limits that are important in
+any astronomical data analysis and how MakeCatalog makes it easy to find
+them. Depending on the higher-level analysis, there are more tests that
+must be done, but these are relatively low-level and usually necessary in
+most cases. In astronomy, it is common to use the magnitude (a unit-less
+scale) and physical units, see @ref{Flux Brightness and
+magnitude}. Therefore the measurements discussed here are commonly used in
+units of magnitudes.
 
 @table @asis
 
 @item Surface brightness limit (of whole dataset)
 @cindex Surface brightness
 As we make more observations on one region of the sky, and add the
-observations into one dataset, we are able to decrease the standard
-deviation of the noise in each pixel@footnote{This is true for any noisy
-data, not just astronomical images.}. Qualitatively, this decrease
-manifests its self by making fainter (per pixel) parts of the objects in
-the image more visible. Technically, this is known as surface
-brightness. Quantitatively, it increases the Signal to noise ratio, since
-the signal increases faster than noise with more data. It is very important
-to have in mind that here, noise is defined per pixel (or in the units of
-our data measurement), not per object.
+observations into one dataset, the signal and noise both increase. However,
+the signal increase much faster than the noise: assuming you add @mymath{N}
+datasets with equal exposure times, the signal will increases as a multiple
+of @mymath{N}, while noise increases as @mymath{\sqrt{N}}. Thus this
+increases the signal-to-noise ratio. Qualitatively, fainter (per pixel)
+parts of the objects/signal in the image will become more
+visible/detectable. The noise-level is known as the dataset's surface
+brightness limit.
 
 You can think of the noise as muddy water that is completely covering a
 flat ground@footnote{The ground is the sky value in this analogy, see
 @ref{Sky value}. Note that this analogy only holds for a flat sky value
-across the surface of the image or ground.} with some regions higher than
-the others@footnote{The peaks are the brightest parts of astronomical
-objects in this analogy.} in it. In this analogy, height (from the ground)
-is @emph{surface brightness}. Let's assume that in your first observation
-the muddy water has just been stirred and you can't see anything through
-it. As you wait and make more observations, the mud settles down and the
-@emph{depth} of the transparent water increases, making the summits of
-hills visible. As the depth of clear water increases, the parts of the
-hills with lower heights (less parts with lower surface brightness) can be
-seen more clearly.
+across the surface of the image or ground.}. The signal (or astronomical
+objects in this analogy) will be summits/hills that start from the flat sky
+level (under the muddy water) and can sometimes reach outside of the muddy
+water. Let's assume that in your first observation the muddy water has just
+been stirred and you can't see anything through it. As you wait and make
+more observations/exposures, the mud settles down and the @emph{depth} of
+the transparent water increases, making the summits visible. As the depth
+of clear water increases, the parts of the hills with lower heights (parts
+with lower surface brightness) can be seen more clearly. In this analogy,
+height (from the ground) is @emph{surface brightness}@footnote{Note that
+this muddy water analogy is not perfect, because while the water-level
+remains the same all over a peak, in data analysis, the poisson noise
+increases with the level of data.} and the height of the muddy water is
+your surface brightness limit.
 
 @cindex Data's depth
 The outputs of NoiseChisel include the Sky standard deviation
 (@mymath{\sigma}) on every group of pixels (a mesh) that were calculated
-from the undetected pixels in that mesh, see @ref{Tessellation} and
+from the undetected pixels in each tile, see @ref{Tessellation} and
 @ref{NoiseChisel output}. Let's take @mymath{\sigma_m} as the median
 @mymath{\sigma} over the successful meshes in the image (prior to
 interpolation or smoothing).
 
-On different instruments pixels have different physical sizes (for example
-in micro-meters, or spatial angle over the sky), nevertheless, a pixel is
+On different instruments, pixels have different physical sizes (for example
+in micro-meters, or spatial angle over the sky). Nevertheless, a pixel is
 our unit of data collection. In other words, while quantifying the noise,
 the physical or projected size of the pixels is irrelevant. We thus define
 the Surface brightness limit or @emph{depth}, in units of magnitude/pixel,
@@ -14821,23 +14828,23 @@ the pixel scale, we can obtain a more easily 
comparable surface brightness
 limit in units of: magnitude/arcsec@mymath{^2}. Let's assume that the
 dataset has a zeropoint value of @mymath{z}, and every pixel is @mymath{p}
 arcsec@mymath{^2} (so @mymath{A/p} is the number of pixels that cover an
-area of @mymath{A} arcsec@mymath{^2}). If the @mymath{n}th multiple of
-@mymath{\sigma_m} is desired, then the surface brightness (in units of
-magnitudes per A arcsec@mymath{^2}) is@footnote{If we have @mymath{N}
-datasets, each with noise @mymath{\sigma}, the noise of a combined dataset
-will increase as @mymath{\sqrt{N}\sigma}.}:
+area of @mymath{A} arcsec@mymath{^2}). If the surface brightness is desired
+at the @mymath{n}th multiple of @mymath{\sigma_m}, the following equation
+(in units of magnitudes per A arcsec@mymath{^2}) can be used:
 
 @dispmath{SB_{\rm Projected}=-2.5\times\log_{10}{\left(n\sigma_m\sqrt{A\over
 p}\right)+z}}
 
-Note that this is an extrapolation of the actually measured value of
-@mymath{\sigma_m} (which was per pixel). So it should be used with extreme
-care (for example the dataset must have an approximately flat depth). For
-each detection over the dataset, you can estimate an upper-limit magnitude
-which actually uses the detection's area/footprint. It doesn't extrapolate
-and even accounts for correlated noise features. Therefore, the upper-limit
-magnitude is a much better measure of your dataset's surface brightness
-limit for each particular object.
+Note that this is just an extrapolation of the per-pixel measurement
+@mymath{\sigma_m}. So it should be used with extreme care: for example the
+dataset must have an approximately flat depth or noise properties
+overall. A more accurate measure for each detection over the dataset is
+known as the @emph{upper-limit magnitude} which actually uses random
+positioning of each detection's area/footprint (see below). It doesn't
+extrapolate and even accounts for correlated noise patterns in relation to
+that detection. Therefore, the upper-limit magnitude is a much better
+measure of your dataset's surface brightness limit for each particular
+object.
 
 MakeCatalog will calculate the input dataset's @mymath{SB_{\rm Pixel}} and
 @mymath{SB_{\rm Projected}} and write them as comments/meta-data in the
@@ -14851,11 +14858,11 @@ them will also decrease. An important statistic is 
thus the fraction of
 objects of similar morphology and brightness that will be identified with
 our detection algorithm/parameters in the given image. This fraction is
 known as completeness. For brighter objects, completeness is 1: all bright
-objects that might exist over the image will be detected. However, as we
-go to lower surface brightness objects, we fail to detect some and
-gradually we are not able to detect anything any more. For a given profile,
-the magnitude where the completeness drops below a certain level usually
-above @mymath{90\%} is known as the completeness limit.
+objects that might exist over the image will be detected. However, as we go
+to objects of lower overall surface brightness, we will fail to detect
+some, and gradually we are not able to detect anything any more. For a
+given profile, the magnitude where the completeness drops below a certain
+level (usually above @mymath{90\%}) is known as the completeness limit.
 
 @cindex Purity
 @cindex False detections
@@ -14910,49 +14917,54 @@ magnitude error afterwards for any type of target.
 @item Upper limit magnitude (of each detection)
 Due to the noisy nature of data, it is possible to get arbitrarily low
 values for a faint object's brightness (or arbitrarily high
-magnitudes). Given the scatter caused by the noise, such small values are
-meaningless: another similar depth observation will give a radically
-different value. This problem is most common when you use one image/filter
-to generate target labels (which specify which pixels belong to which
-object, see @ref{NoiseChisel output} and @ref{MakeCatalog}) and another
-image/filter to generate a catalog for measuring colors.
-
-The object might not be visible in the filter used for the latter image, or
-the image @emph{depth} (see above) might be much shallower. So you will get
-unreasonably faint magnitudes. For example when the depth of the image is 32
-magnitudes, a measurement that gives a magnitude of 36 for a
-@mymath{\sim100} pixel object is clearly unreliable. In another similar
-depth image, we might measure a magnitude of 30 for it, and yet another
-might give 33. Furthermore, due to the noise scatter so close to the depth
-of the data-set, the total brightness might actually get measured as a
-negative value, so no magnitude can be defined (recall that a magnitude is
-a base-10 logarithm).
+@emph{magnitudes}). Given the scatter caused by the dataset's noise, values
+fainter than a certain level are meaningless: another similar depth
+observation will give a radically different value. This problem is usually
+becomes relevant when the detection and measurement images are not the same
+(for example when you are estimating colors, see @ref{NoiseChisel output}).
+
+For example, while the depth of the image is 32 magnitudes/pixel, a
+measurement that gives a magnitude of 36 for a @mymath{\sim100} pixel
+object is clearly unreliable. In another similar depth image, we might
+measure a magnitude of 30 for it, and yet another might give
+33. Furthermore, due to the noise scatter so close to the depth of the
+data-set, the total brightness might actually get measured as a negative
+value, so no magnitude can be defined (recall that a magnitude is a base-10
+logarithm).
 
 @cindex Upper limit magnitude
 @cindex Magnitude, upper limit
 Using such unreliable measurements will directly affect our analysis, so we
-must not use them. However, all is not lost! Given our limited depth, there
-is one thing we can deduce about the object's magnitude: we can say that if
-something actually exists here (possibly buried deep under the noise), it
-must have a magnitude that is fainter than an @emph{upper limit
-magnitude}. To find this upper limit magnitude, we place the object's
-footprint (segmentation map) over random parts of the image where there are
-no detections, so we only have pure (possibly correlated) noise and
-undetected objects. Doing this a large number of times will give us a
-distribution of brightness values. The standard deviation (@mymath{\sigma})
-of that distribution can be used to quantify the upper limit magnitude.
+must not use the raw measurements. However, all is not lost! Given our
+limited depth, there is one thing we can deduce about the object's
+magnitude: we can say that if something actually exists here (possibly
+buried deep under the noise), it must have a magnitude that is fainter than
+an @emph{upper limit magnitude}. To find this upper limit magnitude, we
+place the object's footprint (segmentation map) over random parts of the
+image where there are no detections, so we only have pure (possibly
+correlated) noise and undetected objects. Doing this a large number of
+times will give us a distribution of brightness values. The standard
+deviation (@mymath{\sigma}) of that distribution can be used to quantify
+the upper limit magnitude.
 
 @cindex Correlated noise
 Traditionally, faint/small object photometry was done using fixed circular
-apertures (for example with a diameter of @mymath{N} arc-seconds). In this
-way, the upper limit was like the depth discussed above: one value for the
-whole image. But with the much more advanced hardware and software of
-today, we can make customized segmentation maps for each object. The number
-of pixels (are of the object) used directly affects the final distribution
-and thus magnitude. Also the image correlated noise might actually create
-certain patters, so the shape of the object can also affect the result. So
-in MakeCatalog, the upper limit magnitude is found for each object in the
-image separately. Not one value for the whole image.
+apertures (for example with a diameter of @mymath{N} arc-seconds). Hence,
+the upper limit was like the depth discussed above: one value for the whole
+image. The problem with this simplified approach is that the number of
+pixels in the aperture directly affects the final distribution and thus
+magnitude. Also the image correlated noise might actually create certain
+patters, so the shape of the object can also affect the final result
+result. Fortunately, with the much more advanced hardware and software of
+today, we can make customized segmentation maps for each object.
+
+
+If requested, MakeCatalog will estimate teh the upper limit magnitude is
+found for each object in the image separately, the procedure is fully
+configurable with the options in @ref{Upper-limit magnitude settings}. If
+one value for the whole image is required, you can either use the surface
+brightness limit above or make a circular aperture and feed it into
+MakeCatalog to request an upper-limit magnitude for it.
 
 @end table
 
@@ -15820,13 +15832,34 @@ magnitude settings} for a complete explanation. This 
is very important for
 the fainter and smaller objects in the image where the measured magnitudes
 are not reliable.
 
-
 @item --upperlimitmag
 The upper limit magnitude for this object or clump. See @ref{Quantifying
 measurement limits} and @ref{Upper-limit magnitude settings} for a complete
 explanation. This is very important for the fainter and smaller objects in
 the image where the measured magnitudes are not reliable.
 
+@item --upperlimitonesigma
+The @mymath{1\sigma} upper limit value (in units of the input image) for
+this object or clump. See @ref{Quantifying measurement limits} and
+@ref{Upper-limit magnitude settings} for a complete explanation. When
+@option{--upnsigma=1}, this column's values will be the same as
+@option{--upperlimit}.
+
+@item --upperlimitsigma
+The position of the total brightness measured within the distribution of
+randomly placed upperlimit measurements in units of the distribution's
+@mymath{\sigma} or standard deviation. See @ref{Quantifying measurement
+limits} and @ref{Upper-limit magnitude settings} for a complete
+explanation.
+
+@item --upperlimitquantile
+The position of the total brightness measured within the distribution of
+randomly placed upperlimit measurements as a quantile (value between 0 or
+1). See @ref{Quantifying measurement limits} and @ref{Upper-limit magnitude
+settings} for a complete explanation. If the object is brighter than the
+brightest randomly placed profile, a value of @code{inf} is returned. If it
+is less than the minimum, a value of @code{-inf} is reported.
+
 @item --riverave
 [Clumps] The average brightness of the river pixels around this
 clump. River pixels were defined in Akhlaghi and Ichikawa 2015. In
@@ -16988,19 +17021,25 @@ format}.
 @itemize
 @item
 S@'ersic profile with `@code{sersic}' or `@code{1}'.
+
 @item
 Moffat profile with `@code{moffat}' or `@code{2}'.
+
 @item
 Gaussian profile with `@code{gaussian}' or `@code{3}'.
+
 @item
 Point source with `@code{point}' or `@code{4}'.
+
 @item
 Flat profile with `@code{flat}' or `@code{5}'.
+
 @item
 Circumference profile with `@code{circum}' or `@code{6}'. A fixed
 value will be used for all pixels between the truncation radius
 (@mymath{r_t}) and @mymath{r_t-w} (@mymath{w} is the value to the
 @option{--circumwidth}).
+
 @item
 @item
 Radial distance profile with `@code{distance}' or `@code{7}'. At the lowest
@@ -20098,12 +20137,13 @@ freed after you are done with it.
 Astronomical datasets have various dimensions, for example 1D spectra or
 table columns, 2D images, or 3D Integral field data cubes. Datasets can
 also have various numeric data types, depending on the operation/purpose,
-for example processed images are in commonly in floating point, but their
-mask images are integers using bit-wise flags to identify certain classes
-of special pixels, see @ref{Numeric data types}). Certain other information
-about a dataset are also commonly necessary, for example the units of the
-dataset, the name of the dataset and some comments. To deal with any
-generic dataset, Gnuastro defines the @code{gal_data_t} as input or output.
+for example processed images are commonly stored in floating point format,
+but their mask images are integers (allowing bit-wise flags to identify
+certain classes of pixels to keep or mask, see @ref{Numeric data
+types}). Certain other information about a dataset are also commonly
+necessary, for example the units of the dataset, the name of the dataset
+and some comments. To deal with any generic dataset, Gnuastro defines the
+@code{gal_data_t} as input or output.
 
 @menu
 * Generic data container::      Definition of Gnuastro's generic container.
@@ -20487,11 +20527,11 @@ simplify the allocation (and later cleaning) of 
several @code{gal_data_t}s
 that are related.
 
 For example, each column in a table is usually represented by one
-@code{gal_data_t} (so it has its own name, numeric data type, units and
-etc). A table (with many columns) can be seen as an array of
-@code{gal_data_t}s (when the number of columns is known a-priori). The
-functions below are defined to create a cleared array of data structures
-and to free them in the end. These functions are declared in
+@code{gal_data_t} (so it has its own name, data type, units and etc). A
+table (with many columns) can be seen as an array of @code{gal_data_t}s
+(when the number of columns is known a-priori). The functions below are
+defined to create a cleared array of data structures and to free them when
+none are necessary any more. These functions are declared in
 @file{gnuastro/data.h} which is also visible from the function names (see
 @ref{Gnuastro library}).
 
@@ -23898,16 +23938,18 @@ datatype of the output is the same as @code{input}. 
See
 Return the index of the quantile function (inverse quantile) of
 @code{input} at @code{value}. In other words, this function will return the
 index of the nearest element (of a sorted and non-blank) @code{input} to
-@code{value}. See @code{gal_statistics_median} for a description of
-@code{inplace}.
+@code{value}. If the value is outside the range of the input, then this
+function will return @code{GAL_BLANK_SIZE_T}.
 @end deftypefun
 
 @deftypefun {gal_data_t *} gal_statistics_quantile_function (gal_data_t 
@code{*input}, gal_data_t @code{*value}, int @code{inplace})
 Return a single-element (@code{double} or @code{float64}) dataset
 containing the quantile function of the non-blank values in @code{input} at
 @code{value}. In other words, this function will return the quantile of
-@code{value} in @code{input}. See @code{gal_statistics_median} for a
-description of @code{inplace}.
+@code{value} in @code{input}. If the value is smaller than the input's
+smallest element, the returned value will be zero. If the value is larger
+than the input's largest element, then the returned value is 1. See
+@code{gal_statistics_median} for a description of @code{inplace}.
 @end deftypefun
 
 @deftypefun {gal_data_t *} gal_statistics_mode (gal_data_t @code{*input}, 
float @code{mirrordist}, int @code{inplace})
diff --git a/lib/fits.c b/lib/fits.c
index 939d816..aadd92e 100644
--- a/lib/fits.c
+++ b/lib/fits.c
@@ -2396,12 +2396,23 @@ gal_fits_tab_read(char *filename, char *hdu, size_t 
numrows,
           }
 
       /* Allocate a blank value for the given type and read/store the
-         column using CFITSIO. Afterwards, free the blank value. */
-      blank=gal_blank_alloc_write(out->type);
+         column using CFITSIO. Afterwards, free the blank value. Note that
+         we only need blank values for integer types. For floating point
+         types, the FITS standard defines blanks as NaN (same as almost any
+         other software like Gnuastro). However if a blank value is
+         specified, CFITSIO will convert other special numbers like `inf'
+         to NaN also. We want to be able to distringuish `inf' and NaN
+         here, so for floating point types, we won't define any blank
+         value. */
+      blank = ( (out->type==GAL_TYPE_FLOAT32 || out->type==GAL_TYPE_FLOAT64)
+                ? NULL
+                : gal_blank_alloc_write(out->type) );
       fits_read_col(fptr, gal_fits_type_to_datatype(out->type), ind->v+1,
                     1, 1, out->size, blank, out->array, &anynul, &status);
       gal_fits_io_error(status, NULL);
-      free(blank);
+
+      /* Clean up. */
+      if(blank) free(blank);
     }
 
   /* Close the FITS file */
@@ -2707,7 +2718,7 @@ gal_fits_tab_write(gal_data_t *cols, gal_list_str_t 
*comments,
   fptr=gal_fits_open_to_write(filename);
 
 
-  /* prepare necessary arrays and if integer type columns have blank
+  /* Prepare necessary arrays and if integer type columns have blank
      values, write the TNULLn keywords into the FITS file. */
   fits_table_prepare_arrays(cols, numcols, tableformat,
                             &tform, &ttype, &tunit);
diff --git a/lib/statistics.c b/lib/statistics.c
index ce71667..6c19f36 100644
--- a/lib/statistics.c
+++ b/lib/statistics.c
@@ -353,29 +353,44 @@ gal_statistics_quantile(gal_data_t *input, double 
quantile, int inplace)
 /* Return the index of the (first) point in the sorted dataset that has the
    closest value to `value' (which has to be the same type as the `input'
    dataset). */
-#define STATS_QFUNC(IT) {                                               \
+#define STATS_QFUNC_IND(IT) {                                           \
     IT *r, *a=nbs->array, *af=a+nbs->size, v=*((IT *)(value->array));   \
                                                                         \
-    /* For a reference. Since we are comparing with the previous. */    \
+    /* For a reference. Since we are comparing with the previous */     \
     /* element, we need to start with the second element.*/             \
     r=a++;                                                              \
                                                                         \
     /* Increasing array: */                                             \
     if( *a < *(a+1) )                                                   \
-      do if(*a>v) { if( v - *(a-1) < *a - v ) --a; break; } while(++a<af); \
+      {                                                                 \
+        if( v>=*r )                                                     \
+          {                                                             \
+            do if(*a>v) { if( v - *(a-1) < *a - v ) --a; break; }       \
+            while(++a<af);                                              \
+            parsed=1;                                                   \
+          }                                                             \
+      }                                                                 \
                                                                         \
     /* Decreasing array. */                                             \
     else                                                                \
-      do if(*a<v) { if( *(a-1) - v < v - *a ) --a; break; } while(++a<af); \
+      {                                                                 \
+        if(v<=*r)                                                       \
+          {                                                             \
+            do if(*a<v) { if( *(a-1) - v < v - *a ) --a; break; }       \
+            while(++a<af);                                              \
+            parsed=1;                                                   \
+          }                                                             \
+      }                                                                 \
                                                                         \
-    /* Set the difference. */                                           \
-    if(a<af) index=a-r;                                                 \
+    /* Set the difference if the value is actually in the range. */     \
+    if(parsed && a<af) index = a-r;                                     \
   }
 size_t
 gal_statistics_quantile_function_index(gal_data_t *input, gal_data_t *value,
                                        int inplace)
 {
-  size_t index=-1;
+  int parsed=0;
+  size_t index=GAL_BLANK_SIZE_T;
   gal_data_t *nbs=gal_statistics_no_blank_sorted(input, inplace);
 
   /* A sanity check. */
@@ -386,16 +401,16 @@ gal_statistics_quantile_function_index(gal_data_t *input, 
gal_data_t *value,
   /* Find the result: */
   switch(nbs->type)
     {
-    case GAL_TYPE_UINT8:     STATS_QFUNC( uint8_t  );     break;
-    case GAL_TYPE_INT8:      STATS_QFUNC( int8_t   );     break;
-    case GAL_TYPE_UINT16:    STATS_QFUNC( uint16_t );     break;
-    case GAL_TYPE_INT16:     STATS_QFUNC( int16_t  );     break;
-    case GAL_TYPE_UINT32:    STATS_QFUNC( uint32_t );     break;
-    case GAL_TYPE_INT32:     STATS_QFUNC( int32_t  );     break;
-    case GAL_TYPE_UINT64:    STATS_QFUNC( uint64_t );     break;
-    case GAL_TYPE_INT64:     STATS_QFUNC( int64_t  );     break;
-    case GAL_TYPE_FLOAT32:   STATS_QFUNC( float    );     break;
-    case GAL_TYPE_FLOAT64:   STATS_QFUNC( double   );     break;
+    case GAL_TYPE_UINT8:     STATS_QFUNC_IND( uint8_t  );     break;
+    case GAL_TYPE_INT8:      STATS_QFUNC_IND( int8_t   );     break;
+    case GAL_TYPE_UINT16:    STATS_QFUNC_IND( uint16_t );     break;
+    case GAL_TYPE_INT16:     STATS_QFUNC_IND( int16_t  );     break;
+    case GAL_TYPE_UINT32:    STATS_QFUNC_IND( uint32_t );     break;
+    case GAL_TYPE_INT32:     STATS_QFUNC_IND( int32_t  );     break;
+    case GAL_TYPE_UINT64:    STATS_QFUNC_IND( uint64_t );     break;
+    case GAL_TYPE_INT64:     STATS_QFUNC_IND( int64_t  );     break;
+    case GAL_TYPE_FLOAT32:   STATS_QFUNC_IND( float    );     break;
+    case GAL_TYPE_FLOAT64:   STATS_QFUNC_IND( double   );     break;
     default:
       error(EXIT_FAILURE, 0, "%s: type code %d not recognized",
             __func__, nbs->type);
@@ -411,12 +426,24 @@ gal_statistics_quantile_function_index(gal_data_t *input, 
gal_data_t *value,
 
 
 /* Return the quantile function of the given value as float64. */
+#define STATS_QFUNC(IT) {                                               \
+    IT *a=nbs->array, v=*((IT *)(value->array));                        \
+                                                                        \
+    /* Increasing array: */                                             \
+    if( *a < *(a+1) )                                                   \
+      d[0] = v<*a ? -INFINITY : INFINITY;                               \
+                                                                        \
+    /* Decreasing array. */                                             \
+    else                                                                \
+      d[0] = v>*a ? INFINITY : -INFINITY;                               \
+  }
 gal_data_t *
 gal_statistics_quantile_function(gal_data_t *input, gal_data_t *value,
                                  int inplace)
 {
   double *d;
   size_t dsize=1;
+  gal_data_t *nbs=gal_statistics_no_blank_sorted(input, inplace);
   gal_data_t *out=gal_data_alloc(NULL, GAL_TYPE_FLOAT64, 1, &dsize,
                                  NULL, 1, -1, NULL, NULL, NULL);
   size_t ind=gal_statistics_quantile_function_index(input, value, inplace);
@@ -424,7 +451,32 @@ gal_statistics_quantile_function(gal_data_t *input, 
gal_data_t *value,
   /* Note that counting of the index starts from 0, so for the quantile we
      should divided by (size - 1). */
   d=out->array;
-  d[0] = ( ind==-1 ? NAN : ((double)ind) / ((double)(input->size - 1)) );
+  if(ind==GAL_BLANK_SIZE_T)
+    {
+      /* See if the value is larger or smaller than the input's minimum or
+         maximum. */
+      switch(nbs->type)
+        {
+        case GAL_TYPE_UINT8:     STATS_QFUNC( uint8_t  );     break;
+        case GAL_TYPE_INT8:      STATS_QFUNC( int8_t   );     break;
+        case GAL_TYPE_UINT16:    STATS_QFUNC( uint16_t );     break;
+        case GAL_TYPE_INT16:     STATS_QFUNC( int16_t  );     break;
+        case GAL_TYPE_UINT32:    STATS_QFUNC( uint32_t );     break;
+        case GAL_TYPE_INT32:     STATS_QFUNC( int32_t  );     break;
+        case GAL_TYPE_UINT64:    STATS_QFUNC( uint64_t );     break;
+        case GAL_TYPE_INT64:     STATS_QFUNC( int64_t  );     break;
+        case GAL_TYPE_FLOAT32:   STATS_QFUNC( float    );     break;
+        case GAL_TYPE_FLOAT64:   STATS_QFUNC( double   );     break;
+        default:
+          error(EXIT_FAILURE, 0, "%s: type code %d not recognized",
+                __func__, nbs->type);
+        }
+    }
+  else
+    d[0] = (double)ind / ((double)(nbs->size - 1));
+
+  /* Clean up and return. */
+  if(nbs!=input) gal_data_free(nbs);
   return out;
 }
 



reply via email to

[Prev in Thread] Current Thread [Next in Thread]