gnuastro-commits
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[gnuastro-commits] master d0d8d20 109/113: Imported recent work in maste


From: Mohammad Akhlaghi
Subject: [gnuastro-commits] master d0d8d20 109/113: Imported recent work in master, minor conflicts fixed
Date: Fri, 16 Apr 2021 10:34:02 -0400 (EDT)

branch: master
commit d0d8d20c9d96e862f4e437c2db341c1cfa48cdee
Merge: 3f51e31 6b59be2
Author: Mohammad Akhlaghi <mohammad@akhlaghi.org>
Commit: Mohammad Akhlaghi <mohammad@akhlaghi.org>

    Imported recent work in master, minor conflicts fixed
    
    Some minor conflicts came up during the merge that are now fixed.
---
 NEWS                         |   20 +
 THANKS                       |    1 +
 bin/arithmetic/arithmetic.c  |   10 +-
 bin/crop/ui.c                |    9 +-
 bin/crop/wcsmode.c           |    5 +-
 bin/mkprof/mkprof.c          |   18 +-
 bin/mkprof/oneprofile.c      |    7 +-
 bin/mkprof/oneprofile.h      |    2 +-
 bin/noisechisel/threshold.c  |    6 +-
 bin/warp/ui.c                |    7 +-
 configure.ac                 |   87 +-
 doc/Makefile.am              |    3 +-
 doc/announce-acknowledge.txt |    2 +
 doc/gnuastro.texi            | 2336 ++++++++++++++++++++++--------------------
 lib/Makefile.am              |    3 +-
 lib/dimension.c              |    2 +-
 lib/qsort.c                  |   57 +-
 17 files changed, 1383 insertions(+), 1192 deletions(-)

diff --git a/NEWS b/NEWS
index 2d2a501..9756ca1 100644
--- a/NEWS
+++ b/NEWS
@@ -23,6 +23,13 @@ See the end of the file for license conditions.
      dataset and returns a single-dimension output, containing only the
      unique values in the dataset.
 
+  Crop:
+   - Can also crop 3D datasets (data cubes). A 3D crop has the same syntax
+     as the old 2D mode, only when the dataset is 3D, three coordinates
+     (values, ranges or catalog-columns) should be given to the relevant
+     option. Just note that `--polygon' crops are still not supported in
+     3D.
+
   CosmicCalculator:
    --obsline: alternative way to set the used redshift. With this option
      instead of explicitly giving the redshift, you can give a rest-frame
@@ -126,6 +133,15 @@ See the end of the file for license conditions.
        - gal_tiff_read
        - gal_txt_image_read
 
+  Book:
+   - The two larger tutorials ("General program usage tutorial", and
+     "Detecting large extended targets") have been broken into subsections
+     for easier readability.
+   - The "Hubble visually checks and classifies his catalog" tutorial has
+     been removed because it didn't come with a dataset, so it was hard for
+     people to use. Also, all its operations were already present in the
+     general tutorial.
+
 ** Bugs fixed
   bug #56195: astscript-sort-by-night crashing because of AWK.
   bug #56246: Single-valued measurement must abort with no value in Statistics.
@@ -135,6 +151,10 @@ See the end of the file for license conditions.
   bug #56324: Column metadata not usable when input is from pipe/stdin.
   bug #56424: Warp crashes with empty string given to options.
   bug #56480: Segfault in statistics library's histogram function.
+  bug #56641: MakeProfile's center position changes based on precision.
+  bug #56635: Update tutorial 3 with bug-fixed NoiseChisel.
+  bug #56662: Converting -R to -Wl,-R causes a crash in configure on macOS.
+  bug #56671: Bad sorting with asttable if nan is present.
 
 
 
diff --git a/THANKS b/THANKS
index dbc5c88..0420e36 100644
--- a/THANKS
+++ b/THANKS
@@ -65,6 +65,7 @@ support in Gnuastro. The list is ordered alphabetically (by 
family name).
     Elham Saremi                         saremi@ipm.ir
     Yahya Sefidbakht                     y.sefidbakht@gmail.com
     Alejandro Serrano Borlaff            asborlaff@ucm.es
+    Zahra Sharbaf                        samaeh.sharbaf2@yahoo.com
     Jenny Sorce                          jenny.sorce@univ-lyon1.fr
     Lee Spitler                          lee.spitler@mq.edu.au
     Richard Stallman                     rms@gnu.org
diff --git a/bin/arithmetic/arithmetic.c b/bin/arithmetic/arithmetic.c
index b14f7e3..2a370c5 100644
--- a/bin/arithmetic/arithmetic.c
+++ b/bin/arithmetic/arithmetic.c
@@ -758,20 +758,24 @@ arithmetic_collapse(struct arithmeticparams *p, char 
*token, int operator)
 
   /* Small sanity check. */
   if( dimension->ndim!=1 || dimension->size!=1)
-    error(EXIT_FAILURE, 0, "First popped operand of `collapse-*' operators "
+    error(EXIT_FAILURE, 0, "first popped operand of `collapse-*' operators "
           "(dimension to collapse) must be a single number (single-element, "
           "one-dimensional dataset). But it has %zu dimension(s) and %zu "
           "element(s).", dimension->ndim, dimension->size);
   if(dimension->type==GAL_TYPE_FLOAT32 || dimension->type==GAL_TYPE_FLOAT64)
-    error(EXIT_FAILURE, 0, "First popped operand of `collapse-*' operators "
+    error(EXIT_FAILURE, 0, "first popped operand of `collapse-*' operators "
           "(dimension to collapse) must have an integer type, but it has "
           "a floating point type (`%s')", gal_type_name(dimension->type,1));
   dimension=gal_data_copy_to_new_type_free(dimension, GAL_TYPE_LONG);
   dim=((long *)(dimension->array))[0];
   if(dim<0 || dim==0)
-    error(EXIT_FAILURE, 0, "First popped operand of `collapse-*' operators "
+    error(EXIT_FAILURE, 0, "first popped operand of `collapse-*' operators "
           "(dimension to collapse) must be positive (larger than zero), it "
           "is %ld", dim);
+  if(dim > input->ndim)
+    error(EXIT_FAILURE, 0, "input dataset to `%s' has %zu dimension(s), "
+          "but you have asked to collapse along dimension %zu", token,
+          input->ndim, dim);
 
 
   /* If a WCS structure has been read, we'll need to pass it to
diff --git a/bin/crop/ui.c b/bin/crop/ui.c
index 429d30a..939826b 100644
--- a/bin/crop/ui.c
+++ b/bin/crop/ui.c
@@ -937,12 +937,11 @@ ui_preparations(struct cropparams *p)
       if(p->mode==IMGCROP_MODE_WCS) wcsmode_check_prepare(p, img);
     }
 
-  /***************************************************/
-  /********** Until 3D is fully implemented **********/
+  /* Polygon cropping is currently only supported on 2D */
   if(p->imgs->ndim!=2 && p->polygon)
-    error(EXIT_FAILURE, 0, "%s: currently only 2D datasets are "
-          "usable with polygon cropping", p->imgs->name);
-  /***************************************************/
+    error(EXIT_FAILURE, 0, "%s: polygon cropping is currently only "
+          "supported on 2D datasets (images), not %zuD datasets",
+          p->imgs->name, p->imgs->ndim);
 
 
   /* Unify central crop methods into `p->centercoords'. */
diff --git a/bin/crop/wcsmode.c b/bin/crop/wcsmode.c
index aa56a30..f57ef3d 100644
--- a/bin/crop/wcsmode.c
+++ b/bin/crop/wcsmode.c
@@ -279,8 +279,9 @@ wcsmode_crop_corners(struct onecropparams *crp)
     {
       /* A small sanity check. */
       if(ndim!=2)
-        error(EXIT_FAILURE, 0, "%s: currently only supports 2D datasets, "
-              "your input dataset has %zu dimensions", __func__, ndim);
+        error(EXIT_FAILURE, 0, "%s: polygon crops are currently only "
+              "supported on 2D datasets, the input dataset is %zuD",
+              __func__, ndim);
 
       /* Find their minimum and maximum values. */
       for(i=0;i<p->nvertices;++i)
diff --git a/bin/mkprof/mkprof.c b/bin/mkprof/mkprof.c
index 41a6020..2d30e99 100644
--- a/bin/mkprof/mkprof.c
+++ b/bin/mkprof/mkprof.c
@@ -345,7 +345,7 @@ mkprof_build_single(struct mkonthread *mkp, long *fpixel_i, 
long *lpixel_i,
      overlapping region. */
   if(p->out)
     {
-      /* Note that `fpixel_o' and `lpixel_o' were in the un-oversampled
+      /* Note that `fpixel_i' and `lpixel_o' were in the un-oversampled
          image, they are also in the FITS coordinates. */
       for(i=0;i<ndim;++i)
         {
@@ -489,20 +489,20 @@ mkprof_build(void *inparam)
   /* Make each profile that was specified for this thread. */
   for(i=0; mkp->indexs[i]!=GAL_BLANK_SIZE_T; ++i)
     {
-      /* Create a new builtqueue element with all the information. fbq
-         will be used when we want to add ibq to p->bq. It is defined
-         so we don't have to waste time traversing the ibq. Its
-         characteristic compared to the other elements of ibq is that
-         fbq->next==NULL. So to add ibq to p->bq, we just have to set
-         fbq->next=p->bq and then set p->bq to ibq.*/
+      /* Create a new builtqueue element with all the information. `fbq'
+         will be used when we want to add `ibq' to `p->bq'. It is defined
+         so we don't have to waste time traversing the `ibq'. Its
+         characteristic compared to the other elements of `ibq' is that
+         `fbq->next==NULL'. So to add ibq to p->bq, we just have to set
+         `fbq->next=p->bq' and then set `p->bq' to `ibq'.*/
       builtqueue_addempty(&mkp->ibq);
       ibq=mkp->ibq;
       id=ibq->id=mkp->indexs[i];
       if(fbq==NULL) fbq=ibq;
 
 
-      /* Write the necessary parameters for this profile into mkp.*/
-      oneprof_set_prof_params(mkp);
+      /* Write the necessary parameters for this profile into `mkp'.*/
+      oneprofile_set_prof_params(mkp);
 
 
       /* Find the bounding box size (NOT oversampled). */
diff --git a/bin/mkprof/oneprofile.c b/bin/mkprof/oneprofile.c
index db79d3f..0aacb3c 100644
--- a/bin/mkprof/oneprofile.c
+++ b/bin/mkprof/oneprofile.c
@@ -62,19 +62,18 @@ oneprofile_center_oversampled(struct mkonthread *mkp)
 {
   struct mkprofparams *p=mkp->p;
 
-  double *dim;
   long os=p->oversample;
+  double *dim, r=1000000;
   size_t i, id=mkp->ibq->id;
   double val, pixfrac, intpart;
 
   for(i=0;i<p->ndim;++i)
     {
       dim = i==0 ? p->x : (i==1 ? p->y : p->z);
-
       pixfrac = modf(fabs(dim[id]), &intpart);
       val     = ( os*(mkp->width[i]/2 + pixfrac)
                   + (pixfrac<0.5f ? os/2 : -1*os/2-1) );
-      mkp->center[i] = round(val*100)/100;
+      mkp->center[i] = round(val*r)/r;
     }
 }
 
@@ -539,7 +538,7 @@ oneprofile_ispsf(uint8_t fcode)
 
 /* Prepare all the parameters for any type of profile. */
 void
-oneprof_set_prof_params(struct mkonthread *mkp)
+oneprofile_set_prof_params(struct mkonthread *mkp)
 {
   struct mkprofparams *p=mkp->p;
 
diff --git a/bin/mkprof/oneprofile.h b/bin/mkprof/oneprofile.h
index acc7aea..e38b61e 100644
--- a/bin/mkprof/oneprofile.h
+++ b/bin/mkprof/oneprofile.h
@@ -29,7 +29,7 @@ int
 oneprofile_ispsf(uint8_t fcolvalue);
 
 void
-oneprof_set_prof_params(struct mkonthread *mkp);
+oneprofile_set_prof_params(struct mkonthread *mkp);
 
 void
 oneprofile_make(struct mkonthread *mkp);
diff --git a/bin/noisechisel/threshold.c b/bin/noisechisel/threshold.c
index 2d4fa02..41f3e12 100644
--- a/bin/noisechisel/threshold.c
+++ b/bin/noisechisel/threshold.c
@@ -577,9 +577,9 @@ void
 threshold_quantile_find_apply(struct noisechiselparams *p)
 {
   char *msg;
+  size_t nval;
   gal_data_t *num;
   struct timeval t1;
-  size_t nval, nblank;
   struct qthreshparams qprm;
   struct gal_options_common_params *cp=&p->cp;
   struct gal_tile_two_layer_params *tl=&cp->tl;
@@ -660,10 +660,6 @@ threshold_quantile_find_apply(struct noisechiselparams *p)
         }
     }
 
-  /* A small sanity check. */
-  nblank=gal_blank_number(qprm.erode_th, 1);
-  if( nblank > qprm.erode_th->size-cp->interpnumngb )
-    threshold_good_error(qprm.erode_th->size-nblank, 0, cp->interpnumngb);
 
   /* Remove outliers if requested. */
   if(p->outliersigma!=0.0)
diff --git a/bin/warp/ui.c b/bin/warp/ui.c
index ce1d1a5..2a95287 100644
--- a/bin/warp/ui.c
+++ b/bin/warp/ui.c
@@ -224,8 +224,11 @@ ui_add_to_modular_warps_ll(struct argp_option *option, 
char *arg,
   gal_data_t *new;
   struct warpparams *p=(struct warpparams *)params;
 
-  /* Make sure we actually have a string to parse. */
-  if(*arg=='\0')
+  /* When an argument is necessary (note that `--align' doesn't take an
+     argument), make sure we actually have a string to parse. Also note
+     that if an argument is necessary, but none is given Argp will
+     automatically abort the program with an informative error. */
+  if(arg && *arg=='\0')
     error(EXIT_FAILURE, 0, "empty string given to `--%s'", option->name);
 
   /* Parse the (possible) arguments. */
diff --git a/configure.ac b/configure.ac
index 7f1ee16..cd8f11f 100644
--- a/configure.ac
+++ b/configure.ac
@@ -340,7 +340,6 @@ has_gsl=yes
 has_libgit2=1
 has_cmath=yes
 has_wcslib=yes
-ldlibpathnew=""
 has_cfitsio=yes
 has_libtiff=yes
 has_libjpeg=yes
@@ -348,18 +347,21 @@ has_gslcblas=yes
 missing_mandatory=no
 missing_optional_lib=no
 
+# Keep the original LIBS to re-set in the end.
+orig_LIBS="$LIBS"
+
 # Order is important here.
 AC_LIB_HAVE_LINKFLAGS([m], [], [#include <math.h>])
 AS_IF([test "x$LIBM" = x],
       [missing_mandatory=yes; has_cmath=no],
-      [LIBS="$LIBM $LIBS"])
+      [LDADD="$LIBM $LDADD"; LIBS="$LIBM $LIBS"])
 
 AC_LIB_HAVE_LINKFLAGS([gsl], [gslcblas], [
 #include <gsl/gsl_rng.h>
 void junk(void) { gsl_rng_env_setup(); } ])
 AS_IF([test "x$LIBGSL" = x],
       [missing_mandatory=yes; has_gsl=no; has_gslcblas=no],
-      [GAL_LIBCHECK([$LIBGSL], [libgsl], [-lgsl -lgslcblas])])
+      [LDADD="$LTLIBGSL $LDADD"; LIBS="$LIBGSL $LIBS"])
 
 
 # Since version 0.42, if `libcurl' is installed, CFITSIO will link with it
@@ -377,14 +379,19 @@ AS_IF([test "x$LIBGSL" = x],
 # these extra libraries here.
 AC_LIB_HAVE_LINKFLAGS([z], [], [#include <zlib.h>])
 AS_IF([test "x$LIBZ" = x], [],
-      [GAL_LIBCHECK([$LIBZ], [libz], [-lz])])
+      [LDADD="$LTLIBZ $LDADD"; LIBS="$LIBZ $LIBS"])
 
 
 AC_LIB_HAVE_LINKFLAGS([curl], [], [#include <curl/curl.h>])
 AS_IF([test "x$LIBCURL" = x], [],
-      [GAL_LIBCHECK([$LIBCURL], [libcurl], [-lcurl])])
+      [LDADD="$LTLIBCURL $LDADD"; LIBS="$LIBCURL $LIBS"])
 
 
+# Older versions of CFITSIO don't install a shared library, only a static
+# one. Eventhough we add `-lm' to LDADD, on some systems, it complains
+# about not finding basic math libraries. Therefore the configure script
+# can't find CFITSIO, eventhough it exists. The solution is to manually add
+# the math-library as a dependency of CFITSIO.
 AC_LIB_HAVE_LINKFLAGS([cfitsio], [], [
 #include <fitsio.h>
 void junk(void) {
@@ -393,7 +400,7 @@ fitsfile *f;
 ffopen(&f, "junk", READONLY, &status);} ])
 AS_IF([test "x$LIBCFITSIO" = x],
       [missing_mandatory=yes; has_cfitsio=no],
-      [GAL_LIBCHECK([$LIBCFITSIO], [libcfitsio], [-lcfitsio])])
+      [LDADD="$LTLIBCFITSIO $LDADD"; LIBS="$LIBCFITSIO $LIBS"])
 
 
 AC_LIB_HAVE_LINKFLAGS([wcs], [], [
@@ -406,7 +413,7 @@ wcspih(header, 1, 0, 0, &nreject, &nwcs, &wcs);
 } ])
 AS_IF([test "x$LIBWCS" = x],
       [missing_mandatory=yes; has_wcslib=no],
-      [GAL_LIBCHECK([$LIBWCS], [libwcs], [-lwcs])])
+      [LDADD="$LTLIBWCS $LDADD"; LIBS="$LIBWCS $LIBS"])
 
 
 AC_LIB_HAVE_LINKFLAGS([jpeg], [], [
@@ -418,11 +425,8 @@ void junk(void) {
   jpeg_create_decompress(&cinfo);
 }  ])
 AS_IF([test "x$LIBJPEG" = x],
-      [missing_optional_lib=yes; has_libjpeg=no],
-      [GAL_LIBCHECK([$LIBJPEG], [libjpeg], [-ljpeg])])
-AS_IF([test "x$has_libjpeg" = "xyes"],
-      [AC_DEFINE([HAVE_LIBJPEG], [], [Has libjpeg])],
-      [anywarnings=yes])
+      [missing_optional_lib=yes; has_libjpeg=no; anywarnings=yes],
+      [LDADD="$LTLIBJPEG $LDADD"; LIBS="$LIBJPEG $LIBS"])
 AM_CONDITIONAL([COND_HASLIBJPEG], [test "x$has_libjpeg" = "xyes"])
 
 
@@ -433,18 +437,15 @@ AM_CONDITIONAL([COND_HASLIBJPEG], [test "x$has_libjpeg" = 
"xyes"])
 # don't need to stop the build if this fails.
 AC_LIB_HAVE_LINKFLAGS([lzma], [], [#include <lzma.h>])
 AS_IF([test "x$LIBLZMA" = x], [],
-      [GAL_LIBCHECK([$LIBLZMA], [liblzma], [-llzma])])
+      [LDADD="$LTLIBLZMA $LDADD"; LIBS="$LIBLZMA $LIBS"])
 
 AC_LIB_HAVE_LINKFLAGS([tiff], [], [
 #include <tiffio.h>
 void junk(void) {TIFF *tif=TIFFOpen("junk", "r");}
 ])
 AS_IF([test "x$LIBTIFF" = x],
-      [missing_optional_lib=yes; has_libtiff=no],
-      [GAL_LIBCHECK([$LIBTIFF], [libtiff], [-ltiff])])
-AS_IF([test "x$has_libtiff" = "xyes"],
-      [AC_DEFINE([HAVE_LIBTIFF], [], [Has libtiff])],
-      [anywarnings=yes])
+      [missing_optional_lib=yes; has_libtiff=no; anywarnings=yes],
+      [LDADD="$LTLIBTIFF $LDADD"; LIBS="$LIBTIFF $LIBS"])
 AM_CONDITIONAL([COND_HASLIBTIFF], [test "x$has_libtiff" = "xyes"])
 
 
@@ -456,11 +457,10 @@ void junk(void) {git_libgit2_init();}
 ])
 AS_IF([test "x$LIBGIT2" = x],
       [missing_optional_lib=yes; has_libgit2=0],
-      [GAL_LIBCHECK([$LIBGIT2], [libgit2], [-lgit2])])
+      [LDADD="$LTLIBGIT2 $LDADD"; LIBS="$LIBGIT2 $LIBS"])
 AC_DEFINE_UNQUOTED([GAL_CONFIG_HAVE_LIBGIT2], [$has_libgit2],
                    [libgit2 is installed on the system])
 AS_IF([test "x$has_libgit2" = "x1"], [], [anywarnings=yes])
-AC_SUBST(HAVE_LIBGIT2, [$has_libgit2])
 
 
 
@@ -471,8 +471,8 @@ AC_SUBST(HAVE_LIBGIT2, [$has_libgit2])
 #
 # Once we know that a library exsits, we need to check if it has some
 # features or not. This must be done _after_ checking the existance of
-# _all_ the libraries, because they may add elements to `LIBS' that causes
-# possibly different versions of the libraries to be read.
+# _all_ the libraries, because they may add elements to `LIBS'/`LDADD' that
+# causes possibly different versions of the libraries to be read.
 
 # GSL's `gsl_interp_steffen' isn't a function. So we'll need to use
 # `AC_LINK_IFELSE'. However, AC_LINK_IFELSE doesn't use `LDADD', so we'll
@@ -940,6 +940,17 @@ AM_CONDITIONAL([COND_WARP],        [test $enable_warp = 
yes])
 
 
 
+# Reset LIBS to the initial value BEFORE generating the Makefiles (so the
+# modified value doesn't get written into them). Then report the final
+# linking flags and put them in the Makefiles.
+LIBS="$orig_LIBS"
+AC_SUBST(CONFIG_LDADD, [$LDADD])
+AS_ECHO(["linking flags (LDADD) ... $LDADD"])
+
+
+
+
+
 # Tell autoconf what to work on: TEMPLATE cannot be put and then
 # commented here like the cases above, so don't forget to add your new
 # utility name here.
@@ -997,22 +1008,6 @@ AC_CONFIG_COMMANDS([man page directory], [$MKDIR_P 
doc/man])
 
 
 
-# If it was necessary to add run-time linking directories do it. The
-# `ldlibpathnew' variable keeps the directories that have been added to
-# `LD_LIBRARY_PATH' (to inform the user in a message after
-# installation). The updated `LD_LIBRARY_PATH' also has to be sent to the
-# Makefiles for checking specially (when the programs are run).
-AS_IF([test "x$ldlibpathnew" = x], [],
-      [
-        anywarnings=yes
-        AC_SUBST(ldlibpathnew,    [$ldlibpathnew])
-        AC_SUBST(LD_LIBRARY_PATH, [$LD_LIBRARY_PATH])
-      ])
-
-
-
-
-
 # Prepare the Makefiles.
 AC_OUTPUT
 
@@ -1104,7 +1099,10 @@ AS_IF([test x$enable_guide_message = xyes],
                AS_ECHO(["  the mandatory and optional dependencies in one 
command. See the link"])
                AS_ECHO(["  below:"])
                AS_ECHO(["    
https://www.gnu.org/s/gnuastro/manual/html_node/Dependencies-from-package-managers.html";])
-               AS_ECHO([])])
+               AS_ECHO([])
+               AS_ECHO(["  All checks related to the warning(s) above will be 
skipped."])
+               AS_ECHO([])
+              ])
 
         # Notice about PATH: The last two scenarios described below are
         # taken from
@@ -1125,26 +1123,21 @@ AS_IF([test x$enable_guide_message = xyes],
                AS_ECHO([]) ])
 
         # Notice about run-time linking.
-        AS_IF([test "x$ldlibpathnew" = x], [],
+        AS_IF([test "x$nldpath" = x], [],
               [AS_ECHO(["  - After installation, to run Gnuastro's programs, 
your run-time"])
                AS_ECHO(["    link path (LD_LIBRARY_PATH) needs to contain the 
following "])
                AS_ECHO(["    directory(s):"])
-               AS_ECHO(["        $ldlibpathnew"])
+               AS_ECHO(["        $nldpath"])
                AS_ECHO(["    If there is more than one directory, they are 
separated with a"])
                AS_ECHO(["    colon (':'). You can check the current value 
with:"])
                AS_ECHO(["        echo \$LD_LIBRARY_PATH"])
                AS_ECHO(["    If not present, add this line in your shell's 
startup script"])
                AS_ECHO(["    (for example '~/.bashrc'):"])
-               AS_ECHO(["        export 
LD_LIBRARY_PATH=\"\$LD_LIBRARY_PATH:$ldlibpathnew\""])
+               AS_ECHO(["        export 
LD_LIBRARY_PATH=\"\$LD_LIBRARY_PATH:$nldpath\""])
                AS_ECHO(["    This worning won't cause any problems during the 
rest of Gnuastro's"])
                AS_ECHO(["    build and installation. But you'll need it later, 
when you are using"])
                AS_ECHO(["    Gnuastro."])
                AS_ECHO([]) ])
-
-        # Inform the user on skipped tests.
-        AS_IF([test "x$dependency_notice" = "xyes"],
-              [AS_ECHO(["  All checks related to the warning(s) above will be 
skipped."])
-               AS_ECHO([]) ])
       ]
      )
   AS_ECHO(["To build Gnuastro $PACKAGE_VERSION, please run:"])
diff --git a/doc/Makefile.am b/doc/Makefile.am
index 6cdeef6..3082fb4 100644
--- a/doc/Makefile.am
+++ b/doc/Makefile.am
@@ -144,7 +144,8 @@ dist_man_MANS = $(MAYBE_ARITHMETIC_MAN) 
$(MAYBE_BUILDPROG_MAN)          \
 ## want to overwhelm the user with any commands, so we just let them know
 ## that the distributed man pages will be used.
 if COND_HASHELP2MAN
-  MAYBE_HELP2MAN = help2man --output=$@ --source="$(PACKAGE_STRING)"
+  MAYBE_HELP2MAN = help2man --no-discard-stderr --output=$@ \
+                   --source="$(PACKAGE_STRING)"
 else
   MAYBE_HELP2MAN = @echo "Using distributed man page for"
 endif
diff --git a/doc/announce-acknowledge.txt b/doc/announce-acknowledge.txt
index a83dd5d..241dcec 100644
--- a/doc/announce-acknowledge.txt
+++ b/doc/announce-acknowledge.txt
@@ -1,12 +1,14 @@
 Alphabetically ordered list to acknowledge in the next release.
 
 Hamed Altafi
+Roberto Baena Gallé
 Zahra Bagheri
 Leindert Boogaard
 Bruno Haible
 Raul Infante-Sainz
 Lee Kelvin
 Elham Saremi
+Zahra Sharbaf
 David Valls-Gabaud
 Michael Wilkinson
 
diff --git a/doc/gnuastro.texi b/doc/gnuastro.texi
index 30b36c4..cdba66c 100644
--- a/doc/gnuastro.texi
+++ b/doc/gnuastro.texi
@@ -241,9 +241,37 @@ New to GNU/Linux?
 Tutorials
 
 * Sufi simulates a detection::  Simulating a detection.
-* General program usage tutorial::  Usage of all programs in a good way.
-* Detecting large extended targets::  Using NoiseChisel for huge extended 
targets.
-* Hubble visually checks and classifies his catalog::  Visual checks on a 
catalog.
+* General program usage tutorial::  Tutorial on many programs in generic 
scenario.
+* Detecting large extended targets::  NoiseChisel for huge extended targets.
+
+Sufi simulates a detection
+
+* General program usage tutorial::
+
+General program usage tutorial
+
+* Calling Gnuastro's programs::  Easy way to find Gnuastro's programs.
+* Accessing documentation::     Access to manual of programs you are running.
+* Setup and data download::     Setup this template and download datasets.
+* Dataset inspection and cropping::  Crop the flat region to use in next steps.
+* Angular coverage on the sky::  Measure the field size on the sky.
+* Cosmological coverage::       Measure the field size at different redshifts.
+* Building custom programs with the library::  Easy way to build new programs.
+* Option management and configuration files::  Dealing with options and 
configuring them.
+* Warping to a new pixel grid::  Transforming/warping the dataset.
+* Multiextension FITS files NoiseChisel's output::  Using extensions in FITS 
files.
+* NoiseChisel optimization for detection::  Check NoiseChisel's operation and 
improve it.
+* NoiseChisel optimization for storage::  Dramatically decrease output's 
volume.
+* Segmentation and making a catalog::  Finding true peaks and creating a 
catalog.
+* Working with catalogs estimating colors::  Estimating colors using the 
catalogs.
+* Aperture photomery::          Doing photometry on a fixed aperture.
+* Finding reddest clumps and visual inspection::  Selecting some targets and 
inspecting them.
+* Citing and acknowledging Gnuastro::  How to cite and acknowledge Gnuastro in 
your papers.
+
+Detecting large extended targets
+
+* NoiseChisel optimization::    Optimize NoiseChisel to dig very deep.
+* Achieved surface brightness level::  Measure how much you detected.
 
 Installation
 
@@ -1810,14 +1838,13 @@ command-line environment) effectively for your research.
 
 In @ref{Sufi simulates a detection}, we'll start with a
 fictional@footnote{The two historically motivated tutorials (@ref{Sufi
-simulates a detection} and @ref{Hubble visually checks and classifies his
-catalog}) are not intended to be a historical reference (the historical
-facts of this fictional tutorial used Wikipedia as a reference). This form
-of presenting a tutorial was influenced by the PGF/TikZ and Beamer
-manuals. They are both packages in in @TeX{} and @LaTeX{}, the first is a
-high-level vector graphic programming environment, while with the second
-you can make presentation slides. On a similar topic, there are also some
-nice words of wisdom for Unix-like systems called
+simulates a detection} is not intended to be a historical reference (the
+historical facts of this fictional tutorial used Wikipedia as a
+reference). This form of presenting a tutorial was influenced by the
+PGF/TikZ and Beamer manuals. They are both packages in in @TeX{} and
+@LaTeX{}, the first is a high-level vector graphic programming environment,
+while with the second you can make presentation slides. On a similar topic,
+there are also some nice words of wisdom for Unix-like systems called
 @url{http://catb.org/esr/writings/unix-koans, Rootless Root}. These also
 have a similar style but they use a mythical figure named Master Foo. If
 you already have some experience in Unix-like systems, you will definitely
@@ -1850,7 +1877,7 @@ and can only try one of the tutorials, we recommend this 
one.
 @ref{Detecting large extended targets} deals with a major problem in
 astronomy: effectively detecting the faint outer wings of bright (and
 large) nearby galaxies to extremely low surface brightness levels (roughly
-1/20th of the local noise level in the example discussed). Besides the
+one quarter of the local noise level in the example discussed). Besides the
 interesting scientific questions in these low-surface brightness features,
 failure to properly detect them will bias the measurements of the
 background objects and the survey's noise estimates. This is an important
@@ -1859,12 +1886,6 @@ stars@footnote{Stars also have similarly large and 
extended wings due to
 the point spread function, see @ref{PSF}.}, cover a significant fraction of
 the survey area.
 
-Finally, in @ref{Hubble visually checks and classifies his catalog}, we go
-into the historical/fictional world again to see how Hubble could have used
-Gnuastro's programs to visually check and classify a sample of galaxies
-which ultimately lead him to the ``Hubble fork'' classification of galaxy
-morphologies.
-
 In these tutorials, we have intentionally avoided too many cross references
 to make it more easy to read. For more information about a particular
 program, you can visit the section with the same name as the program in
@@ -1878,9 +1899,8 @@ use in the example codes through the book, please see 
@ref{Conventions}.
 
 @menu
 * Sufi simulates a detection::  Simulating a detection.
-* General program usage tutorial::  Usage of all programs in a good way.
-* Detecting large extended targets::  Using NoiseChisel for huge extended 
targets.
-* Hubble visually checks and classifies his catalog::  Visual checks on a 
catalog.
+* General program usage tutorial::  Tutorial on many programs in generic 
scenario.
+* Detecting large extended targets::  NoiseChisel for huge extended targets.
 @end menu
 
 
@@ -2014,15 +2034,18 @@ parameters and sets the radius column (@code{rcol} 
above, fifth column) to
 @code{5.000}, he also chooses a Moffat function for its functional
 form. Remembering how diffuse the nebula in the Andromeda constellation
 was, he decides to simulate it with a mock S@'{e}rsic index 1.0 profile. He
-wants the output to be 500 pixels by 500 pixels, so he puts the mock
-profile in the center. Looking at his drawings of it, he decides a
-reasonable effective radius for it would be 40 pixels on this image pixel
-scale, he sets the axis ratio and position angle to approximately correct
-values too and finally he sets the total magnitude of the profile to 3.44
-which he had accurately measured. Sufi also decides to truncate both the
-mock profile and PSF at 5 times the respective radius parameters. In the
-end he decides to put four stars on the four corners of the image at very
-low magnitudes as a visual scale.
+wants the output to be 499 pixels by 499 pixels, so he can put the center
+of the mock profile in the centeral pixel of the image (note that an even
+number doesn't have a central element).
+
+Looking at his drawings of it, he decides a reasonable effective radius for
+it would be 40 pixels on this image pixel scale, he sets the axis ratio and
+position angle to approximately correct values too and finally he sets the
+total magnitude of the profile to 3.44 which he had accurately
+measured. Sufi also decides to truncate both the mock profile and PSF at 5
+times the respective radius parameters. In the end he decides to put four
+stars on the four corners of the image at very low magnitudes as a visual
+scale.
 
 Using all the information above, he creates the catalog of mock profiles he
 wants in a file named @file{cat.txt} (short for catalog) using his favorite
@@ -2046,10 +2069,10 @@ $ cat cat.txt
 # Column 4: PROFILE_NAME [,str7] Radial profile's functional name
  1  0.0000   0.0000  moffat  5.000  4.765  0.0000  1.000  30.000  5.000
  2  250.00   250.00  sersic  40.00  1.000  -25.00  0.400  3.4400  5.000
- 3  50.000   50.000  point   0.000  0.000  0.0000  0.000  9.0000  0.000
- 4  450.00   50.000  point   0.000  0.000  0.0000  0.000  9.2500  0.000
- 5  50.000   450.00  point   0.000  0.000  0.0000  0.000  9.5000  0.000
- 6  450.00   450.00  point   0.000  0.000  0.0000  0.000  9.7500  0.000
+ 3  50.000   50.000  point   0.000  0.000  0.0000  0.000  6.0000  0.000
+ 4  450.00   50.000  point   0.000  0.000  0.0000  0.000  6.5000  0.000
+ 5  50.000   450.00  point   0.000  0.000  0.0000  0.000  7.0000  0.000
+ 6  450.00   450.00  point   0.000  0.000  0.0000  0.000  7.5000  0.000
 @end example
 
 @noindent
@@ -2058,7 +2081,7 @@ necessary parameters and runs MakeProfiles with the 
following command:
 
 @example
 
-$ astmkprof --prepforconv --mergedsize=500,500 --zeropoint=18.0 cat.txt
+$ astmkprof --prepforconv --mergedsize=499,499 --zeropoint=18.0 cat.txt
 MakeProfiles started on Sat Oct  6 16:26:56 953
   - 6 profiles read from cat.txt
   - Random number generator (RNG) type: mt19937
@@ -2089,11 +2112,18 @@ as we see when we image the sky at night. So Sufi 
explained to him that the
 stars will take the shape of the PSF after convolution and this is how they
 would look if we didn't have an atmosphere or an aperture when we took the
 image. The size of the image was also surprising for the student, instead
-of 500 by 500, it was 2630 by 2630 pixels. So Sufi had to explain why
-oversampling is important for parts of the image where the flux change is
-significant over a pixel. Sufi then explained to him that after convolving
-we will re-sample the image to get our originally desired size. To convolve
-the image, Sufi ran the following command:
+of 499 by 499, it was 2615 by 2615 pixels (from the command below):
+
+@example
+$ astfits cat.fits -h1 | grep NAXIS
+@end example
+
+@noindent
+So Sufi explained why oversampling is important for parts of the image
+where the flux change is significant over a pixel. Sufi then explained to
+him that after convolving we will re-sample the image to get our originally
+desired size/resolution. To convolve the image, Sufi ran the following
+command:
 
 @example
 $ astconvolve --kernel=0_cat.fits cat.fits
@@ -2139,25 +2169,35 @@ cat_convolved.fits  cat.fits
 
 $ astfits -p cat_convolved_scaled.fits | grep NAXIS
 NAXIS   =                    2 / number of data axes
-NAXIS1  =                  526 / length of data axis 1
-NAXIS2  =                  526 / length of data axis 2
+NAXIS1  =                  523 / length of data axis 1
+NAXIS2  =                  523 / length of data axis 2
 @end example
 
 @noindent
 @file{cat_convolved_scaled.fits} now has the correct pixel scale. However,
-the image is still larger than what we had wanted, it is 526
-(@mymath{500+13+13}) by 526 pixels. The student is slightly confused, so
-Sufi also re-samples the PSF with the same scale and shows him that it is
-27 (@mymath{2\times13+1}) by 27 pixels. Sufi goes on to explain how
+the image is still larger than what we had wanted, it is 523
+(@mymath{499+12+12}) by 523 pixels. The student is slightly confused, so
+Sufi also re-samples the PSF with the same scale by running
+
+@example
+$ astwarp --scale=1/5 --centeroncorner 0_cat.fits
+$ astfits -p 0_cat_scaled.fits | grep NAXIS
+NAXIS   =                    2 / number of data axes
+NAXIS1  =                   25 / length of data axis 1
+NAXIS2  =                   25 / length of data axis 2
+@end example
+
+@noindent
+Sufi notes that @mymath{25=(2\times12)+1} and goes on to explain how
 frequency space convolution will dim the edges and that is why he added the
 @option{--prepforconv} option to MakeProfiles, see @ref{If convolving
 afterwards}. Now that convolution is done, Sufi can remove those extra
 pixels using Crop with the command below. Crop's @option{--section} option
 accepts coordinates inclusively and counting from 1 (according to the FITS
-standard), so the crop's first pixel has to be 14, not 13.
+standard), so the crop region's first pixel has to be 13, not 12.
 
 @example
-$ astcrop cat_convolved_scaled.fits --section=14:*-13,14:*-13    \
+$ astcrop cat_convolved_scaled.fits --section=13:*-12,13:*-12    \
           --mode=img --zeroisnotblank
 Crop started on Sat Oct  6 17:03:24 953
   - Read metadata of 1 image.                          0.001304 seconds
@@ -2170,21 +2210,25 @@ cat_convolved.fits  cat_convolved_scaled.fits          
cat.txt
 @end example
 
 @noindent
-Finally, @file{cat_convolved_scaled_cropped.fits} has the same dimensions
-as Sufi had desired in the beginning. All this trouble was certainly worth
-it because now there is no dimming on the edges of the image and the
-profile centers are more accurately sampled. The final step to simulate a
-real observation would be to add noise to the image. Sufi set the zeropoint
-magnitude to the same value that he set when making the mock profiles and
-looking again at his observation log, he had measured the background flux
-near the nebula had a magnitude of 7 that night. So using these values he
-ran MakeNoise:
+Finally, @file{cat_convolved_scaled_cropped.fits} is @mymath{499\times499}
+pixels and the mock Andromeda galaxy is centered on the central pixel (open
+the image in a FITS viewer and confirm this by zooming into the center,
+note that an even-width image wouldn't have a central pixel). This is the
+same dimensions as Sufi had desired in the beginning. All this trouble was
+certainly worth it because now there is no dimming on the edges of the
+image and the profile centers are more accurately sampled.
+
+The final step to simulate a real observation would be to add noise to the
+image. Sufi set the zeropoint magnitude to the same value that he set when
+making the mock profiles and looking again at his observation log, he had
+measured the background flux near the nebula had a magnitude of 7 that
+night. So using these values he ran MakeNoise:
 
 @example
 $ astmknoise --zeropoint=18 --background=7 --output=out.fits    \
-             cat_convolved_scaled_crop.fits
+             cat_convolved_scaled_cropped.fits
 MakeNoise started on Mon Apr  6 17:05:06 953
-  - Generator type: mt19937
+  - Generator type: ranlxs1
   - Generator seed: 1428318100
 MakeNoise finished in:  0.033491 (seconds)
 
@@ -2216,15 +2260,18 @@ variables for easier customization later. Finally, 
before every command, he
 added some comments (lines starting with @key{#}) for future readability.
 
 @example
-# Basic settings:
-edge=13
+edge=12
 base=cat
 
-# Remove any existing image to avoid confusion.
-rm out.fits
+# Stop running next commands if one fails.
+set -e
+
+# Remove any (possibly) existing output (from previous runs)
+# before starting.
+rm -f out.fits
 
 # Run MakeProfiles to create an oversampled FITS image.
-astmkprof --prepforconv --mergedsize=500,500 --zeropoint=18.0   \
+astmkprof --prepforconv --mergedsize=499,499 --zeropoint=18.0 \
           "$base".txt
 
 # Convolve the created image with the kernel.
@@ -2233,19 +2280,19 @@ astconvolve --kernel=0_"$base".fits "$base".fits
 # Scale the image back to the intended resolution.
 astwarp --scale=1/5 --centeroncorner "$base"_convolved.fits
 
-# Crop the edges out (dimmed during convolution). `--section' accepts
+# Crop the edges out (dimmed during convolution). ‘--section’ accepts
 # inclusive coordinates, so the start of start of the section must be
 # one pixel larger than its end.
 st_edge=$(( edge + 1 ))
-astcrop "$base"_convolved_scaled.fits --zeroisnotblank          \
-        --section=$st_edge:*-$edge,$st_edge:*-$edge
+astcrop "$base"_convolved_scaled.fits --zeroisnotblank \
+        --mode=img --section=$st_edge:*-$edge,$st_edge:*-$edge
 
 # Add noise to the image.
-astmknoise --zeropoint=18 --background=7 --output=out.fits      \
+astmknoise --zeropoint=18 --background=7 --output=out.fits \
            "$base"_convolved_scaled_cropped.fits
 
 # Remove all the temporary files.
-rm 0*.fits cat*.fits
+rm 0*.fits "$base"*.fits
 @end example
 
 @cindex Comments
@@ -2322,6 +2369,10 @@ catalog). It was nearly sunset and they had to begin 
preparing for the
 night's measurements on the ecliptic.
 
 
+@menu
+* General program usage tutorial::
+@end menu
+
 @node General program usage tutorial, Detecting large extended targets, Sufi 
simulates a detection, Tutorials
 @section General program usage tutorial
 
@@ -2332,11 +2383,10 @@ images is one of the most basic and common steps in 
astronomical
 analysis. Here, we will use Gnuastro's programs to get a physical scale
 (area at certain redshifts) of the field we are studying, detect objects in
 a Hubble Space Telescope (HST) image, measure their colors and identify the
-ones with the largest colors to visual inspection and their spatial
-position in the image. After this tutorial, you can also try the
-@ref{Detecting large extended targets} tutorial which goes into a little
-more detail on optimally configuring NoiseChisel (Gnuastro's detection
-tool) in special situations.
+ones with the strongest colors, do a visual inspection of these objects and
+inspect spatial position in the image. After this tutorial, you can also
+try the @ref{Detecting large extended targets} tutorial which goes into a
+little more detail on detecting very low surface brightness signal.
 
 During the tutorial, we will take many detours to explain, and practically
 demonstrate, the many capabilities of Gnuastro's programs. In the end you
@@ -2375,6 +2425,29 @@ commands). Don't simply copy and paste the commands 
shown here. This will
 help simulate future situations when you are processing your own datasets.
 @end cartouche
 
+
+@menu
+* Calling Gnuastro's programs::  Easy way to find Gnuastro's programs.
+* Accessing documentation::     Access to manual of programs you are running.
+* Setup and data download::     Setup this template and download datasets.
+* Dataset inspection and cropping::  Crop the flat region to use in next steps.
+* Angular coverage on the sky::  Measure the field size on the sky.
+* Cosmological coverage::       Measure the field size at different redshifts.
+* Building custom programs with the library::  Easy way to build new programs.
+* Option management and configuration files::  Dealing with options and 
configuring them.
+* Warping to a new pixel grid::  Transforming/warping the dataset.
+* Multiextension FITS files NoiseChisel's output::  Using extensions in FITS 
files.
+* NoiseChisel optimization for detection::  Check NoiseChisel's operation and 
improve it.
+* NoiseChisel optimization for storage::  Dramatically decrease output's 
volume.
+* Segmentation and making a catalog::  Finding true peaks and creating a 
catalog.
+* Working with catalogs estimating colors::  Estimating colors using the 
catalogs.
+* Aperture photomery::          Doing photometry on a fixed aperture.
+* Finding reddest clumps and visual inspection::  Selecting some targets and 
inspecting them.
+* Citing and acknowledging Gnuastro::  How to cite and acknowledge Gnuastro in 
your papers.
+@end menu
+
+@node Calling Gnuastro's programs, Accessing documentation, General program 
usage tutorial, General program usage tutorial
+@subsection Calling Gnuastro's programs
 A handy feature of Gnuastro is that all program names start with
 @code{ast}. This will allow your command-line processor to easily list and
 auto-complete Gnuastro's programs for you.  Try typing the following
@@ -2392,15 +2465,20 @@ the program name will auto-complete once your input 
characters are
 unambiguous. In short, you often don't need to type the full name of the
 program you want to run.
 
+@node Accessing documentation, Setup and data download, Calling Gnuastro's 
programs, General program usage tutorial
+@subsection Accessing documentation
+
 Gnuastro contains a large number of programs and it is natural to forget
 the details of each program's options or inputs and outputs. Therefore,
-before starting the analysis, let's review how you can access this book to
-refresh your memory any time you want. For example when working on the
-command-line, without having to take your hands off the keyboard. When you
-install Gnuastro, this book is also installed on your system along with all
-the programs and libraries, so you don't need an internet connection to to
-access/read it. Also, by accessing this book as described below, you can be
-sure that it corresponds to your installed version of Gnuastro.
+before starting the analysis steps of this tutorial, let's review how you
+can access this book to refresh your memory any time you want, without
+having to take your hands off the keyboard.
+
+When you install Gnuastro, this book is also installed on your system along
+with all the programs and libraries, so you don't need an internet
+connection to to access/read it. Also, by accessing this book as described
+below, you can be sure that it corresponds to your installed version of
+Gnuastro.
 
 @cindex GNU Info
 GNU Info@footnote{GNU Info is already available on almost all Unix-like
@@ -2483,7 +2561,11 @@ $ astnoisechisel --help | grep quant
 $ astnoisechisel --help | grep check
 @end example
 
-Let's start the processing. First, to keep things clean, let's create a
+@node Setup and data download, Dataset inspection and cropping, Accessing 
documentation, General program usage tutorial
+@subsection Setup and data download
+
+The first step in the analysis of the tutorial is to download the necessary
+input datasets. First, to keep things clean, let's create a
 @file{gnuastro-tutorial} directory and continue all future steps in it:
 
 @example
@@ -2493,16 +2575,17 @@ $ cd gnuastro-tutorial
 
 We will be using the near infra-red @url{http://www.stsci.edu/hst/wfc3,
 Wide Field Camera} dataset. If you already have them in another directory
-(for example @file{XDFDIR}), you can set the @file{download} directory to
-be a symbolic link to @file{XDFDIR} with a command like this:
+(for example @file{XDFDIR}, with the same FITS file names), you can set the
+@file{download} directory to be a symbolic link to @file{XDFDIR} with a
+command like this:
 
 @example
 $ ln -s XDFDIR download
 @end example
 
 @noindent
-If the following images aren't already present on your system, you can make
-a @file{download} directory and download them there.
+Otherwise, when the following images aren't already present on your system,
+you can make a @file{download} directory and download them there.
 
 @example
 $ mkdir download
@@ -2514,10 +2597,10 @@ $ cd ..
 @end example
 
 @noindent
-In this tutorial, we'll just use these two filters. Later, you will
-probably need to download more filters, you can use the shell's @code{for}
-loop to download them all in series (one after the other@footnote{Note that
-you only have one port to the internet, so downloading in parallel will
+In this tutorial, we'll just use these two filters. Later, you may need to
+download more filters. To do that, you can use the shell's @code{for} loop
+to download them all in series (one after the other@footnote{Note that you
+only have one port to the internet, so downloading in parallel will
 actually be slower than downloading in series.}) with one command like the
 one below for the WFC3 filters. Put this command instead of the two
 @code{wget} commands above. Recall that all the extra spaces, back-slashes
@@ -2530,37 +2613,53 @@ $ for f in f105w f125w f140w f160w; do                  
            \
   done
 @end example
 
-First, let's visually inspect the dataset. Let's take F160W image as an
-example. Do the steps below with the other image(s) too (and later with any
-dataset that you want to work on). It is very important to understand your
-dataset visually. Note how ds9 doesn't follow the GNU style of options
-where ``long'' and ``short'' options are preceded by @option{--} and
-@option{-} respectively (for example @option{--width} and @option{-w}, see
-@ref{Options}).
 
-Ds9's @option{-zscale} option is a good scaling to highlight the low
-surface brightness regions, and as the name suggests, @option{-zoom to fit}
-will fit the whole dataset in the window. If the window is too small,
-expand it with your mouse, then press the ``zoom'' button on the top row of
-buttons above the image, then in the row below it, press ``zoom fit''. You
-can also zoom in and out by scrolling your mouse or the respective
-operation on your touch-pad when your cursor/pointer is over the image.
+@node Dataset inspection and cropping, Angular coverage on the sky, Setup and 
data download, General program usage tutorial
+@subsection Dataset inspection and cropping
+
+First, let's visually inspect the datasets we downloaded in @ref{Setup and
+data download}. Let's take F160W image as an example. Do the steps below
+with the other image(s) too (and later with any dataset that you want to
+work on). It is very important to get a good visual feeling of the dataset
+you intend to use. Also, note how SAO DS9 (used here for visual inspection
+of FITS images) doesn't follow the GNU style of options where ``long'' and
+``short'' options are preceded by @option{--} and @option{-} respectively
+(for example @option{--width} and @option{-w}, see @ref{Options}).
+
+Run the command below to see the F160W image with DS9. Ds9's
+@option{-zscale} scaling is good to visually highlight the low surface
+brightness regions, and as the name suggests, @option{-zoom to fit} will
+fit the whole dataset in the window. If the window is too small, expand it
+with your mouse, then press the ``zoom'' button on the top row of buttons
+above the image. Afterwards, in the bottom row of buttons, press ``zoom
+fit''. You can also zoom in and out by scrolling your mouse or the
+respective operation on your touch-pad when your cursor/pointer is over the
+image.
 
 @example
 $ ds9 download/hlsp_xdf_hst_wfc3ir-60mas_hudf_f160w_v1_sci.fits     \
       -zscale -zoom to fit
 @end example
 
-The first thing you might notice is that the regions with no data have a
-value of zero in this image. The next thing might be that the dataset
-actually has two ``depth''s (see @ref{Quantifying measurement limits}). The
-exposure time of the deep inner region is more than 4 times of the outer
-parts. Fortunately the XDF survey webpage (above) contains the vertices of
-the deep flat WFC3-IR field. With Gnuastro's Crop program@footnote{To learn
-more about the crop program see @ref{Crop}.}, you can use those vertices to
-cutout this deep infra-red region from the larger image. We'll make a
-directory called @file{flat-ir} and keep the flat infra-red regions in that
-directory (with a `@file{xdf-}' suffix for a shorter and easier filename).
+As you hover your mouse over the image, notice how the ``Value'' and
+positional fields on the top of the ds9 window get updated. The first thing
+you might notice is that when you hover the mouse over the regions with no
+data, they have a value of zero. The next thing might be that the dataset
+actually has two ``depth''s (see @ref{Quantifying measurement
+limits}). Recall that this is a combined/reduced image of many exposures,
+and the parts that have more exposures are deeper. In particular, the
+exposure time of the deep inner region is larger than 4 times of the outer
+(more shallower) parts.
+
+To simplify the analysis in this tutorial, we'll only be working on the
+deep field, so let's crop it out of the full dataset. Fortunately the XDF
+survey webpage (above) contains the vertices of the deep flat WFC3-IR
+field. With Gnuastro's Crop program@footnote{To learn more about the crop
+program see @ref{Crop}.}, you can use those vertices to cutout this deep
+region from the larger image. But before that, to keep things organized,
+let's make a directory called @file{flat-ir} and keep the flat
+(single-depth) regions in that directory (with a `@file{xdf-}' suffix for a
+shorter and easier filename).
 
 @example
 $ mkdir flat-ir
@@ -2579,11 +2678,13 @@ filter name. Therefore, to simplify the command, and 
later allow work on
 more filters, we can use the shell's @code{for} loop. Notice how the two
 places where the filter names (@file{f105w} and @file{f160w}) are used
 above have been replaced with @file{$f} (the shell variable that @code{for}
-is in charge of setting) below. To generalize this for more filters later,
-you can simply add the other filter names in the first line before the
-semi-colon (@code{;}).
+will update in every loop) below. In such cases, you should generally avoid
+repeating a command manually and use loops like below. To generalize this
+for more filters later, you can simply add the other filter names in the
+first line before the semi-colon (@code{;}).
 
 @example
+$ rm flat-ir/*.fits
 $ for f in f105w f160w; do                                            \
     astcrop --mode=wcs -h0 --output=flat-ir/xdf-$f.fits               \
             --polygon="53.187414,-27.779152 : 53.159507,-27.759633 :  \
@@ -2596,8 +2697,8 @@ Please open these images and inspect them with the same 
@command{ds9}
 command you used above. You will see how it is nicely flat now and doesn't
 have varying depths. Another important result of this crop is that regions
 with no data now have a NaN (Not-a-Number, or a blank value) value, not
-zero. Zero is a number, and thus a meaningful value, especially when you
-later want to NoiseChisel@footnote{As you will see below, unlike most other
+zero. Zero is a number, and is thus meaningful, especially when you later
+want to NoiseChisel@footnote{As you will see below, unlike most other
 detection algorithms, NoiseChisel detects the objects from their faintest
 parts, it doesn't start with their high signal-to-noise ratio peaks. Since
 the Sky is already subtracted in many images and noise fluctuates around
@@ -2606,138 +2707,170 @@ not ignoring zero-valued pixels in this image, will 
cause them to part of
 the detections!}. Generally, when you want to ignore some pixels in a
 dataset, and avoid higher-level ambiguities or complications, it is always
 best to give them blank values (not zero, or some other absurdly large or
-small number).
+small number). Gnuastro has the Arithmetic program for such cases, and
+we'll introduce it during this tutorial.
+
+@node Angular coverage on the sky, Cosmological coverage, Dataset inspection 
and cropping, General program usage tutorial
+@subsection Angular coverage on the sky
 
 @cindex @code{CDELT}
 @cindex Coordinate scales
 @cindex Scales, coordinate
 This is the deepest image we currently have of the sky. The first thing
-that comes to mind may be this: ``How large is this field?''. The FITS
-world coordinate system (WCS) meta data standard contains the key to
-answering this question: the @code{CDELT} keyword@footnote{In the FITS
-standard, the @code{CDELT} keywords (@code{CDELT1} and @code{CDELT2} in a
-2D image) specify the scales of each coordinate. In the case of this image
-it is in units of degrees-per-pixel. See Section 8 of the
-@url{https://fits.gsfc.nasa.gov/standard40/fits_standard40aa-le.pdf, FITS
-standard} for more. In short, with the @code{CDELT} convention, rotation
-(@code{PC} or @code{CD} keywords) and scales (@code{CDELT}) are
+that comes to mind may be this: ``How large is this field on the
+sky?''. The FITS world coordinate system (WCS) meta data standard contains
+the key to answering this question: the @code{CDELT} keyword@footnote{In
+the FITS standard, the @code{CDELT} keywords (@code{CDELT1} and
+@code{CDELT2} in a 2D image) specify the scales of each coordinate. In the
+case of this image it is in units of degrees-per-pixel. See Section 8 of
+the @url{https://fits.gsfc.nasa.gov/standard40/fits_standard40aa-le.pdf,
+FITS standard} for more. In short, with the @code{CDELT} convention,
+rotation (@code{PC} or @code{CD} keywords) and scales (@code{CDELT}) are
 separated. In the FITS standard the @code{CDELT} keywords are
 optional. When @code{CDELT} keywords aren't present, the @code{PC} matrix
 is assumed to contain @emph{both} the coordinate rotation and scales. Note
 that not all FITS writers use the @code{CDELT} convention. So you might not
 find the @code{CDELT} keywords in the WCS meta data of some FITS
 files. However, all Gnuastro programs (which use the default FITS keyword
-writing format of WCSLIB), the @code{CDELT} convention is used, even if the
-input doesn't have it. So when rotation and scaling are combined and
-finding the pixel scale isn't trivial from the raw keyword values, you can
-feed the dataset to any (simple) Gnuastro program (for example
-Arithmetic). The output will have the @code{CDELT} keyword.}. With the
-commands below, we'll use it (along with the image size) to find the
-answer. The lines starting with @code{##} are just comments for you to help
-in following the steps. Don't type them on the terminal. The commands are
-intentionally repetitive in some places to better understand each step and
-also to demonstrate the beauty of command-line features like variables,
-pipes and loops. Later, if you would like to repeat this process on another
-dataset, you can just use commands 3, 7, and 9.
+writing format of WCSLIB) write their output WCS with the the @code{CDELT}
+convention, even if the input doesn't have it. If your dataset doesn't use
+the @code{CDELT} convension, you can feed it to any (simple) Gnuastro
+program (for example Arithmetic) and the output will have the @code{CDELT}
+keyword.}.
+
+With the commands below, we'll use @code{CDELT} (along with the image size)
+to find the answer. The lines starting with @code{##} are just comments for
+you to read and understand each command. Don't type them on the
+terminal. The commands are intentionally repetitive in some places to
+better understand each step and also to demonstrate the beauty of
+command-line features like history, variables, pipes and loops (which you
+will commonly use as you master the command-line).
 
 @cartouche
 @noindent
 @strong{Use shell history:} Don't forget to make effective use of your
-shell's history. This is especially convenient when you just want to make a
-small change to your previous command. Press the ``up'' key on your
-keyboard (possibly multiple times) to see your previous command(s).
+shell's history: you don't have to re-type previous command to add
+something to them. This is especially convenient when you just want to make
+a small change to your previous command. Press the ``up'' key on your
+keyboard (possibly multiple times) to see your previous command(s) and
+modify them accordingly.
 @end cartouche
 
 @example
-## (1)  See the general statistics of non-blank pixel values.
+## See the general statistics of non-blank pixel values.
 $ aststatistics flat-ir/xdf-f160w.fits
 
-## (2)  We only want the number of non-blank pixels.
+## We only want the number of non-blank pixels.
 $ aststatistics flat-ir/xdf-f160w.fits --number
 
-## (3)  Keep the result of the command above in the shell variable `n'.
+## Keep the result of the command above in the shell variable `n'.
 $ n=$(aststatistics flat-ir/xdf-f160w.fits --number)
 
-## (4)  See what is stored the shell variable `n'.
+## See what is stored the shell variable `n'.
 $ echo $n
 
-## (5)  Show all the FITS keywords of this image.
+## Show all the FITS keywords of this image.
 $ astfits flat-ir/xdf-f160w.fits -h1
 
-## (6)  The resolution (in degrees/pixel) is in the `CDELT' keywords.
-##      Only show lines that contain these characters, by feeding
-##      the output of the previous command to the `grep' program.
+## The resolution (in degrees/pixel) is in the `CDELT' keywords.
+## Only show lines that contain these characters, by feeding
+## the output of the previous command to the `grep' program.
 $ astfits flat-ir/xdf-f160w.fits -h1 | grep CDELT
 
-## (7)  Save the resolution (same in both dimensions) in the variable
-##      `r'. The last part uses AWK to print the third `field' of its
-##      input line. The first two fields were `CDELT1' and `='.
+## Since the resolution of both dimensions is (approximately) equal,
+## we'll only use one of them (CDELT1).
+$ astfits flat-ir/xdf-f160w.fits -h1 | grep CDELT1
+
+## To extract the value (third token in the line above), we'll
+## feed the output to AWK. Note that the first two tokens are
+## `CDELT1' and `='.
+$ astfits flat-ir/xdf-f160w.fits -h1 | grep CDELT1 | awk '@{print $3@}'
+
+## Save it as the shell variable `r'.
 $ r=$(astfits flat-ir/xdf-f160w.fits -h1 | grep CDELT1   \
               | awk '@{print $3@}')
 
-## (8)  Print the values of `n' and `r'.
+## Print the values of `n' and `r'.
 $ echo $n $r
 
-## (9)  Use the number of pixels (first number passed to AWK) and
-##      length of each pixel's edge (second number passed to AWK)
-##      to estimate the area of the field in arc-minutes squared.
-$ area=$(echo $n $r | awk '@{print $1 * ($2^2) * 3600@}')
+## Use the number of pixels (first number passed to AWK) and
+## length of each pixel's edge (second number passed to AWK)
+## to estimate the area of the field in arc-minutes squared.
+$ echo $n $r | awk '@{print $1 * ($2^2) * 3600@}'
 @end example
 
-The area of this field is 4.03817 (or 4.04) arc-minutes squared. Just for
-comparison, this is roughly 175 times smaller than the average moon's
-angular area (with a diameter of 30arc-minutes or half a degree).
+The output of the last command (area of this field) is 4.03817 (or
+approximately 4.04) arc-minutes squared. Just for comparison, this is
+roughly 175 times smaller than the average moon's angular area (with a
+diameter of 30arc-minutes or half a degree).
 
 @cindex GNU AWK
 @cartouche
 @noindent
-@strong{AWK for table/value processing:} AWK is a powerful and simple tool
-for text processing. Above (and further below) some simple examples are
-shown. GNU AWK (the most common implementation) comes with a free and
+@strong{AWK for table/value processing:} As you saw above AWK is a powerful
+and simple tool for text processing. You will see it often in shell
+scripts. GNU AWK (the most common implementation) comes with a free and
 wonderful @url{https://www.gnu.org/software/gawk/manual/, book} in the same
 format as this book which will allow you to master it nicely. Just like
 this manual, you can also access GNU AWK's manual on the command-line
-whenever necessary without taking your hands off the keyboard.
+whenever necessary without taking your hands off the keyboard. Just run
+@code{info awk}.
 @end cartouche
 
-This takes us to the second question that you have probably asked yourself
-when you saw the field for the first time: ``How large is this area at
-different redshifts?''. To get a feeling of the tangential area that this
-field covers at redshift 2, you can use @ref{CosmicCalculator}. In
+
+@node Cosmological coverage, Building custom programs with the library, 
Angular coverage on the sky, General program usage tutorial
+@subsection Cosmological coverage
+Having found the angular coverage of the dataset in @ref{Angular coverage
+on the sky}, we can now use Gnuastro to answer a more physically motivated
+question: ``How large is this area at different redshifts?''. To get a
+feeling of the tangential area that this field covers at redshift 2, you
+can use Gnuastro's CosmicCalcular program (@ref{CosmicCalculator}). In
 particular, you need the tangential distance covered by 1 arc-second as raw
-output. Combined with the field's area, we can then calculate the
-tangential distance in Mega Parsecs squared (@mymath{Mpc^2}).
+output. Combined with the field's area that was measured before, we can
+calculate the tangential distance in Mega Parsecs squared (@mymath{Mpc^2}).
 
 @example
-## Print general cosmological properties at redshift 2.
+## Print general cosmological properties at redshift 2 (for example).
 $ astcosmiccal -z2
 
 ## When given a "Specific calculation" option, CosmicCalculator
-## will just print that particular calculation. See the options
-## under this title in the output of `--help' for more.
-$ astcosmiccal --help
+## will just print that particular calculation. To see all such
+## calculations, add a `--help' token to the previous command
+## (under the same title). Note that with `--help', no processing
+## is done, so you can always simply append it to remember
+## something without modifying the command you want to run.
+$ astcosmiccal -z2 --help
 
 ## Only print the "Tangential dist. covered by 1arcsec at z (kpc)".
 ## in units of kpc/arc-seconds.
 $ astcosmiccal -z2 --arcsectandist
 
+## But its easier to use the short version of this option (which
+## can be appended to other short options.
+$ astcosmiccal -sz2
+
 ## Convert this distance to kpc^2/arcmin^2 and save in `k'.
-$ k=$(astcosmiccal -z2 --arcsectandist | awk '@{print ($1*60)^2@}')
+$ k=$(astcosmiccal -sz2 | awk '@{print ($1*60)^2@}')
+
+## Re-calculate the area of the dataset in arcmin^2.
+$ n=$(aststatistics flat-ir/xdf-f160w.fits --number)
+$ r=$(astfits flat-ir/xdf-f160w.fits -h1 | grep CDELT1   \
+              | awk '@{print $3@}')
+$ a=$(echo $n $r | awk '@{print $1 * ($2^2) * 3600@}')
 
-## Multiply by the area of the field (in arcmin^2) and divide by
-## 10^6 to return value in Mpc^2.
-$ echo $k $area | awk '@{print $1 * $2 / 1e6@}'
+## Multiply `k' and `a' and divide by 10^6 for value in Mpc^2.
+$ echo $k $a | awk '@{print $1 * $2 / 1e6@}'
 @end example
 
 @noindent
-At redshift 2, this field therefore covers 1.07145 @mymath{Mpc^2}. If you
-would like to see how this tangential area changes with redshift, you can
-use a shell loop like below.
+At redshift 2, this field therefore covers approximately 1.07
+@mymath{Mpc^2}. If you would like to see how this tangential area changes
+with redshift, you can use a shell loop like below.
 
 @example
-$ for z in 0.5 1.0 1.5 2.0 2.5 3.0 3.5 4.0 4.5 5.0; do           \
-    k=$(astcosmiccal -z$z --arcsectandist);                      \
-    echo $z $k $area | awk '@{print $1, ($2*60)^2 * $3 / 1e6@}';   \
+$ for z in 0.5 1.0 1.5 2.0 2.5 3.0 3.5 4.0 4.5 5.0; do        \
+    k=$(astcosmiccal -sz$z);                                  \
+    echo $z $k $a | awk '@{print $1, ($2*60)^2 * $3 / 1e6@}';   \
   done
 @end example
 
@@ -2745,32 +2878,43 @@ $ for z in 0.5 1.0 1.5 2.0 2.5 3.0 3.5 4.0 4.5 5.0; do  
         \
 Fortunately, the shell has a useful tool/program to print a sequence of
 numbers that is nicely called @code{seq}. You can use it instead of typing
 all the different redshifts in this example. For example the loop below
-will print the same range of redshifts (between 0.5 and 5) but with
-increments of 0.1.
+will calculate and print the tangential coverage of this field across a
+larger range of redshifts (0.1 to 5) and with finer increments of 0.1.
 
 @example
-$ for z in $(seq 0.5 0.1 5); do                                  \
+$ for z in $(seq 0.1 0.1 5); do                                  \
     k=$(astcosmiccal -z$z --arcsectandist);                      \
     echo $z $k $area | awk '@{print $1, ($2*60)^2 * $3 / 1e6@}';   \
   done
 @end example
 
-This is a fast and simple way for this repeated calculation when it is only
-necessary once. However, if you commonly need this calculation and possibly
-for a larger number of redshifts, the command above can be slow. This is
-because the CosmicCalculator program has a lot of overhead. To be generic
-and easy to operate, it has to parse the command-line and all configuration
-files (see below) which contain human-readable characters and need a lot of
-processing to be ready for processing by the computer. Afterwards,
+
+@node Building custom programs with the library, Option management and 
configuration files, Cosmological coverage, General program usage tutorial
+@subsection Building custom programs with the library
+In @ref{Cosmological coverage}, we repeated a certain calculation/output of
+a program multiple times using the shell's @code{for} loop. This simple way
+repeating a calculation is great when it is only necessary once. However,
+if you commonly need this calculation and possibly for a larger number of
+redshifts at higher precision, the command above can be slow (try it out to
+see).
+
+This slowness of the repeated calls to a generic program (like
+CosmicCalculator), is because it can have a lot of overhead on each
+call. To be generic and easy to operate, it has to parse the command-line
+and all configuration files (see @ref{Option management and configuration
+files}) which contain human-readable characters and need a lot of
+pre-processing to be ready for processing by the computer. Afterwards,
 CosmicCalculator has to check the sanity of its inputs and check which of
-its many options you have asked for. It has to do all of these for every
-redshift in the loop above.
+its many options you have asked for. All the this pre-processing takes as
+much time as the high-level calculation you are requesting, and it has to
+re-do all of these for every redshift in your loop.
 
-To greatly speed up the processing, you can directly access the root
-work-horse of CosmicCalculator without all that overhead. Using Gnuastro's
-library, you can write your own tiny program particularly designed for this
-exact calculation (and nothing else!). To do that, copy and paste the
-following C program in a file called @file{myprogram.c}.
+To greatly speed up the processing, you can directly access the core
+work-horse of CosmicCalculator without all that overhead by designing your
+custom program for this job. Using Gnuastro's library, you can write your
+own tiny program particularly designed for this exact calculation (and
+nothing else!). To do that, copy and paste the following C program in a
+file called @file{myprogram.c}.
 
 @example
 #include <math.h>
@@ -2807,67 +2951,82 @@ main(void)
 @end example
 
 @noindent
-To greatly simplify the compilation, linking and running of simple C
-programs like this that use Gnuastro's library, Gnuastro has
-@ref{BuildProgram}. This program is designed to manage Gnuastro's
-dependencies, compile and link the program and then run the new program. To
-build, @emph{and run} the program above, use the following command:
+Then run the following command to compile your program and run it.
 
 @example
 $ astbuildprog myprogram.c
 @end example
 
-Did you notice how much faster this was compared to the shell loop we wrote
-above? You might have noticed that a new file called @file{myprogram} is
-also created in the directory. This is the compiled program that was
-created and run by the command above (its in binary machine code format,
-not human-readable any more). You can run it again to get the same results
-with a command like this:
+@noindent
+In the command above, you used Gnuastro's BuildProgram program. Its job is
+to greatly simplify the compilation, linking and running of simple C
+programs that use Gnuastro's library (like this one). BuildProgram is
+designed to manage Gnuastro's dependencies, compile and link your custom
+program and then run it.
+
+Did you notice how your custom program was much faster than the repeated
+calls to CosmicCalculator in the previous section? You might have noticed
+that a new file called @file{myprogram} is also created in the
+directory. This is the compiled program that was created and run by the
+command above (its in binary machine code format, not human-readable any
+more). You can run it again to get the same results with a command like
+this:
 
 @example
 $ ./myprogram
 @end example
 
-The efficiency of @file{myprogram} compared to CosmicCalculator is because
-in the latter, the requested processing is comparable to the necessary
-overheads. For other programs that take large input datasets and do
-complicated processing on them, the overhead is usually negligible compared
-to the processing. In such cases, the libraries are only useful if you want
-a different/new processing compared to the functionalities in Gnuastro's
-existing programs.
-
-Gnuastro has a large library which is heavily used by all the programs. In
-other words, the library is like the skeleton of Gnuastro. For the full
-list of available functions classified by context, please see @ref{Gnuastro
-library}. Gnuastro's library and BuildProgram are created to make it easy
-for you to use these powerful features as you like. This gives you a high
-level of creativity, while also providing efficiency and
+The efficiency of your custom @file{myprogram} compared to repeated calls
+to CosmicCalculator is because in the latter, the requested processing is
+comparable to the necessary overheads. For other programs that take large
+input datasets and do complicated processing on them, the overhead is
+usually negligible compared to the processing. In such cases, the libraries
+are only useful if you want a different/new processing compared to the
+functionalities in Gnuastro's existing programs.
+
+Gnuastro has a large library which is used extensively by all the
+programs. In other words, the library is like the skeleton of Gnuastro. For
+the full list of available functions classified by context, please see
+@ref{Gnuastro library}. Gnuastro's library and BuildProgram are created to
+make it easy for you to use these powerful features as you like. This gives
+you a high level of creativity, while also providing efficiency and
 robustness. Several other complete working examples (involving images and
 tables) of Gnuastro's libraries can be see in @ref{Library demo
-programs}. Let's stop the discussion on libraries at this point in this
-tutorial and get back to Gnuastro's already built programs which were the
-main purpose of this tutorial.
+programs}.
 
+But for this tutorial, let's stop discussing the libraries at this point in
+and get back to Gnuastro's already built programs which don't need any
+programming. But before continuing, let's clean up the files we don't need
+any more:
+
+@example
+$ rm myprogram*
+@end example
+
+
+@node Option management and configuration files, Warping to a new pixel grid, 
Building custom programs with the library, General program usage tutorial
+@subsection Option management and configuration files
 None of Gnuastro's programs keep a default value internally within their
-code. However, when you ran CosmicCalculator with the @option{-z2} option
-above, it completed its processing and printed results. So where did the
-``default'' cosmological parameter values (like the matter density and etc)
-come from?  The values come from the command-line or a configuration file
-(see @ref{Configuration file precedence}).
-
-CosmicCalculator has a small set of parameters/options. Therefore, let's
-use it to discuss configuration files (see @ref{Configuration
+code. However, when you ran CosmicCalculator only with the @option{-z2}
+option (not specifying the cosmological parameters) in @ref{Cosmological
+coverage}, it completed its processing and printed results. Where did the
+necessary cosmological parameters (like the matter density and etc) that
+are necessary for its calculations come from? Fast reply: the values come
+from a configuration file (see @ref{Configuration file precedence}).
+
+CosmicCalculator is a small program with a limited set of
+parameters/options. Therefore, let's use it to discuss configuration files
+in Gnuastro (for more, you can always see @ref{Configuration
 files}). Configuration files are an important part of all Gnuastro's
 programs, especially the ones with a large number of options, so its
 important to understand this part well .
 
-Once you get comfortable with configuration files, you can easily do the
-same for the options of all Gnuastro programs (for example,
-NoiseChisel). Therefore configuration files will be useful for it when you
-use different datasets (with different noise properties or in different
-research contexts). The configuration of each program (besides its version)
-is vital for the reproducibility of your results, so it is important to
-manage them properly.
+Once you get comfortable with configuration files here, you can make good
+use of them in all Gnuastro programs (for example, NoiseChisel). For
+example, to do optimal detection on various datasets, you can have
+configuration files for different noise properties. The configuration of
+each program (besides its version) is vital for the reproducibility of your
+results, so it is important to manage them properly.
 
 As we saw above, the full list of the options in all Gnuastro programs can
 be seen with the @option{--help} option. Try calling it with
@@ -2882,7 +3041,7 @@ $ astcosmiccal --help
 @noindent
 The options that need a value have an @key{=} sign after their long version
 and @code{FLT}, @code{INT} or @code{STR} for floating point numbers,
-integer numbers and strings (filenames for example) respectively. All
+integer numbers, and strings (filenames for example) respectively. All
 options have a long format and some have a short format (a single
 character), for more see @ref{Options}.
 
@@ -2890,28 +3049,36 @@ When you are using a program, it is often necessary to 
check the value the
 option has just before the program starts its processing. In other words,
 after it has parsed the command-line options and all configuration
 files. You can see the values of all options that need one with the
-@option{--printparams} or @code{-P} option that is common to all programs
-(see @ref{Common options}). In the command below, try replacing @code{-P}
-with @option{--printparams} to see how both do the same operation.
+@option{--printparams} or @code{-P} option. @option{--printparams} is
+common to all programs (see @ref{Common options}). In the command below,
+try replacing @code{-P} with @option{--printparams} to see how both do the
+same operation.
 
 @example
 $ astcosmiccal -P
 @end example
 
 Let's say you want a different Hubble constant. Try running the following
-command to see how the Hubble constant in the output of the command above
-has changed. Afterwards, delete the @option{-P} and add a @option{-z2} to
-see the results with the new cosmology (or configuration).
+command (just adding @option{--H0=70} after the command above) to see how
+the Hubble constant in the output of the command above has changed.
 
 @example
 $ astcosmiccal -P --H0=70
 @end example
 
+@noindent
+Afterwards, delete the @option{-P} and add a @option{-z2} to see the
+calculations with the new cosmology (or configuration).
+
+@example
+$ astcosmiccal --H0=70 -z2
+@end example
+
 From the output of the @code{--help} option, note how the option for Hubble
 constant has both short (@code{-H}) and long (@code{--H0}) formats. One
 final note is that the equal (@key{=}) sign is not mandatory. In the short
 format, the value can stick to the actual option (the short option name is
-just one character after-all and thus easily identifiable) and in the long
+just one character after-all, thus easily identifiable) and in the long
 format, a white-space character is also enough.
 
 @example
@@ -2919,6 +3086,15 @@ $ astcosmiccal -H70    -z2
 $ astcosmiccal --H0 70 -z2 --arcsectandist
 @end example
 
+@noindent
+When an option dosn't need a value, and has a short format (like
+@option{--arcsectandist}), you can easily append it @emph{before} other
+short options. So the last command above can also be written as:
+
+@example
+$ astcosmiccal --H0 70 -sz2
+@end example
+
 Let's assume that in one project, you want to only use rounded cosmological
 parameters (H0 of 70km/s/Mpc and matter density of 0.3). You should
 therefore run CosmicCalculator like this:
@@ -2928,17 +3104,18 @@ $ astcosmiccal --H0=70 --olambda=0.7 --omatter=0.3 -z2
 @end example
 
 But having to type these extra options every time you run CosmicCalculator
-will be prone to errors (typos in particular) and also will be frustrating
-and slow. Therefore in Gnuastro, you can put all the options and their
-values in a ``Configuration file'' and tell the programs to read the option
-values from there.
+will be prone to errors (typos in particular), frustrating and
+slow. Therefore in Gnuastro, you can put all the options and their values
+in a ``Configuration file'' and tell the programs to read the option values
+from there.
 
-Let's create a configuration file. In your favorite text editor, make a
+Let's create a configuration file... With your favorite text editor, make a
 file named @file{my-cosmology.conf} (or @file{my-cosmology.txt}, the suffix
-doesn't matter) which contains the following lines. One space between the
-option value and name is enough, the values are just under each other to
-help in readability. Also note that you can only use long option names in
-configuration files.
+doesn't matter, but a more descriptive suffix like @file{.conf} is
+recommended). Then put the following lines inside of it. One space between
+the option value and name is enough, the values are just under each other
+to help in readability. Also note that you can only use long option names
+in configuration files.
 
 @example
 H0       70
@@ -2950,18 +3127,28 @@ omatter  0.3
 You can now tell CosmicCalculator to read this file for option values
 immediately using the @option{--config} option as shown below. Do you see
 how the output of the following command corresponds to the option values in
-@file{my-cosmology.conf} (previous command)?
+@file{my-cosmology.conf}, and is therefore identical to the previous
+command?
 
 @example
 $ astcosmiccal --config=my-cosmology.conf -z2
 @end example
 
-If you need this cosmology every time you are working in a specific
-directory, you can benefit from Gnuastro's default configuration files to
-avoid having to call the @option{--config} option. Let's assume that you
-want any CosmicCalculator call you make in the @file{my-cosmology}
-directory to use these parameters. You just have to copy the above
-configuration file into a special directory and file:
+But still, having to type @option{--config=my-cosmology.conf} everytime is
+annoying, isn't it? If you need this cosmology every time you are working
+in a specific directory, you can use Gnuastro's default configuration file
+names and avoid having to type it manually.
+
+The default configuration files (that are checked if they exist) must be
+placed in the hidden @file{.gnuastro} sub-directory (in the same directory
+you are running the program). Their file name (within @file{.gnuastro})
+must also be the same as the program's executable name. So in the case of
+CosmicCalculator, the default configuration file in a given directory is
+@file{.gnuastro/astcosmiccal.conf}.
+
+Let's do this. We'll first make a directory for our custom cosmology, then
+build a @file{.gnuastro} within it. Finally, we'll copy the custom
+configuration file there:
 
 @example
 $ mkdir my-cosmology
@@ -2969,28 +3156,28 @@ $ mkdir my-cosmology/.gnuastro
 $ mv my-cosmology.conf my-cosmology/.gnuastro/astcosmiccal.conf
 @end example
 
-Once you run CosmicCalculator within @file{my-cosmology} as shown below,
-you will see how your cosmology has been implemented without having to type
-anything extra on the command-line.
+Once you run CosmicCalculator within @file{my-cosmology} (as shown below),
+you will see how your custom cosmology has been implemented without having
+to type anything extra on the command-line.
 
 @example
 $ cd my-cosmology
-$ astcosmiccal -z2
+$ astcosmiccal -P
 $ cd ..
 @end example
 
 To further simplify the process, you can use the @option{--setdirconf}
-option. If you are already in your desired directory, calling this option
-with the others will automatically write the final values (along with
-descriptions) in @file{.gnuastro/astcosmiccal.conf}. For example the
-commands below will make the same configuration file automatically (with
-one extra call to CosmicCalculator).
+option. If you are already in your desired working directory, calling this
+option with the others will automatically write the final values (along
+with descriptions) in @file{.gnuastro/astcosmiccal.conf}. For example try
+the commands below:
 
 @example
 $ mkdir my-cosmology2
 $ cd my-cosmology2
+$ astcosmiccal -P
 $ astcosmiccal --H0 70 --olambda=0.7 --omatter=0.3 --setdirconf
-$ astcosmiccal -z2
+$ astcosmiccal -P
 $ cd ..
 @end example
 
@@ -3002,34 +3189,55 @@ before. Finally, there are also system-wide 
configuration files that can be
 used to define the option values for all users on a system. See
 @ref{Configuration file precedence} for a more detailed discussion.
 
-We are now ready to start processing the downloaded images. Since these
-datasets are already aligned, you don't need to align them to make sure the
-pixel grid covers the same region in all inputs. Gnuastro's Warp program
-has features for such pixel-grid warping (see @ref{Warp}). Therefore, just
-for a demonstration, let's assume one image needs to be rotated by 20
-degrees to correspond to the other. To do that, you can run the following
-command:
+We'll stop the discussion on configuration files here, but you can always
+read about them in @ref{Configuration files}. Before continuing the
+tutorial, let's delete the two extra directories that we don't need any
+more:
+
+@example
+$ rm -rf my-cosmology*
+@end example
+
+
+@node Warping to a new pixel grid, Multiextension FITS files NoiseChisel's 
output, Option management and configuration files, General program usage 
tutorial
+@subsection Warping to a new pixel grid
+We are now ready to start processing the downloaded images. The XDF
+datasets we are using here are already aligned to the same pixel
+grid. However, warping to a different/matched pixel grid is commonly needed
+before higher-level analysis when you are using datasets from different
+instruments. So let's have a look at Gnuastro's features warping features
+here.
+
+Gnuastro's Warp program should be used for warping the pixel-grid (see
+@ref{Warp}). For example, try rotating one of the images by 20 degrees:
 
 @example
 $ astwarp flat-ir/xdf-f160w.fits --rotate=20
 @end example
 
 @noindent
-Open the output and see it. If your final image is already aligned with RA
-and Dec, you can simply use the @option{--align} option and let Warp
-calculate the necessary rotation and apply it.
+Open the output (@file{xdf-f160w_rotated.fits}) and see how it is
+rotated. If your final image is already aligned with RA and Dec, you can
+simply use the @option{--align} option and let Warp calculate the necessary
+rotation and apply it. For example, try aligning the rotated image back to
+the standard orientation (just note that because of the two rotations, the
+NaN parts of the image are larger now):
 
-Warp can generally be used for any kind of pixel grid manipulation
-(warping). For example the outputs of the commands below will respectively
-have larger pixels (new resolution being one quarter the original
-resolution), get shifted by 2.8 (by sub-pixel), get a shear of 2, and be
-tilted (projected). After running each, please open the output file and see
-the effect.
+@example
+$ astwarp xdf-f160w_rotated.fits --align
+@end example
+
+Warp can generally be used for many kinds of pixel grid manipulation
+(warping), not just rotations. For example the outputs of the commands
+below will respectively have larger pixels (new resolution being one
+quarter the original resolution), get shifted by 2.8 (by sub-pixel), get a
+shear of 2, and be tilted (projected). Run each of them and open the output
+file to see the effect, they will become handy for you in the future.
 
 @example
 $ astwarp flat-ir/xdf-f160w.fits --scale=0.25
 $ astwarp flat-ir/xdf-f160w.fits --translate=2.8
-$ astwarp flat-ir/xdf-f160w.fits --shear=2
+$ astwarp flat-ir/xdf-f160w.fits --shear=0.2
 $ astwarp flat-ir/xdf-f160w.fits --project=0.001,0.0005
 @end example
 
@@ -3045,12 +3253,16 @@ $ astwarp flat-ir/xdf-f160w.fits --rotate=20 
--scale=0.25
 If you have multiple warps, do them all in one command. Don't warp them in
 separate commands because the correlated noise will become too strong. As
 you see in the matrix that is printed when you run Warp, it merges all the
-warps into a single warping matrix (see @ref{Warping basics} and
-@ref{Merging multiple warpings}) and simply applies that just once. Recall
-that since this is done through matrix multiplication, order matters in the
-separate operations. In fact through Warp's @option{--matrix} option, you
-can directly request your desired final warp and don't have to break it up
-into different warps like above (see @ref{Invoking astwarp}).
+warps into a single warping matrix (see @ref{Merging multiple warpings})
+and simply applies that (mixes the pixel values) just once. However, if you
+run Warp multiple times, the pixels will be mixed multiple times, creating
+a strong artificial blur/smoothing, or stronger correlated noise.
+
+Recall that the merging of multiple warps is done through matrix
+multiplication, therefore order matters in the separate operations. At a
+lower level, through Warp's @option{--matrix} option, you can directly
+request your desired final warp and don't have to break it up into
+different warps like above (see @ref{Invoking astwarp}).
 
 Fortunately these datasets are already aligned to the same pixel grid, so
 you don't actually need the files that were just generated. You can safely
@@ -3063,20 +3275,25 @@ can simply delete with a generic command like below.
 $ rm *.fits
 @end example
 
-@noindent
-To detect the signal in the image (separate interesting pixels from noise),
-we'll run NoiseChisel (@ref{NoiseChisel}):
 
-@example
-$ astnoisechisel flat-ir/xdf-f160w.fits
-@end example
+@node Multiextension FITS files NoiseChisel's output, NoiseChisel optimization 
for detection, Warping to a new pixel grid, General program usage tutorial
+@subsection Multiextension FITS files (NoiseChisel's output)
+Having completed a review of the basics in the previous sections, we are
+now ready to separate the signal (galaxies or stars) from the background
+noise in the image. We will be using the results of @ref{Dataset inspection
+and cropping}, so be sure you already have them. Gnuastro has NoiseChisel
+for this job. But NoiseChisel's output is a multi-extension FITS file,
+therefore to better understand how to use NoiseChisel, let's take a look at
+multi-extension FITS files and how you can interact with them.
 
-NoiseChisel's output is a single FITS file containing multiple
-extensions. In the FITS format, each extension contains a separate dataset
-(image in this case). You can get basic information about the extensions in
-a FITS file with Gnuastro's Fits program (see @ref{Fits}):
+In the FITS format, each extension contains a separate dataset (image in
+this case). You can get basic information about the extensions in a FITS
+file with Gnuastro's Fits program (see @ref{Fits}). To start with, let's
+run NoiseChisel without any options, then use Gnuastro's FITS program to
+inspect the number of extensions in this file.
 
 @example
+$ astnoisechisel flat-ir/xdf-f160w.fits
 $ astfits xdf-f160w_detected.fits
 @end example
 
@@ -3085,13 +3302,12 @@ extensions and the first (counting from zero, with name
 @code{NOISECHISEL-CONFIG}) is empty: it has value of @code{0} in the last
 column (which shows its size). The first extension in all the outputs of
 Gnuastro's programs only contains meta-data: data about/describing the
-datasets within (all) the output's extension(s). This allows the first
-extension to keep meta-data about all the extensions and is recommended by
-the FITS standard, see @ref{Fits} for more. This generic meta-data (for the
-whole file) is very important for being able to reproduce this same result
-later.
+datasets within (all) the output's extensions. This is recommended by the
+FITS standard, see @ref{Fits} for more. In the case of Gnuastro's programs,
+this generic zero-th/meta-data extension (for the whole file) contains all
+the configuration options of the program that created the file.
 
-The second extension of NoiseChisel's output (numbered 1 and named
+The second extension of NoiseChisel's output (numbered 1, named
 @code{INPUT-NO-SKY}) is the Sky-subtracted input that you provided. The
 third (@code{DETECTIONS}) is NoiseChisel's main output which is a binary
 image with only two possible values for all pixels: 0 for noise and 1 for
@@ -3103,26 +3319,28 @@ your computer, its numeric datatype an unsigned 8-bit 
integer (or
 for the input on a tile grid and were calculated over the undetected
 regions (for more on the importance of the Sky value, see @ref{Sky value}).
 
-Reproducing your results later (or checking the configuration of the
-program that produced the dataset at a later time during your higher-level
-analysis) is very important in any research. Therefore, Let's first take a
-closer look at the @code{NOISECHISEL-CONFIG} extension. But first, we'll
-run NoiseChisel with @option{-P} to see the option values in a format we
-are already familiar with (to help in the comparison).
+Metadata regarding how the analysis was done (or a dataset was created) is
+very important for higher-level analysis and reproducibility. Therefore,
+Let's first take a closer look at the @code{NOISECHISEL-CONFIG}
+extension. If you specify a special header in the FITS file, Gnuastro's
+Fits program will print the header keywords (metadata) of that
+extension. You can either specify the HDU/extension counter (starting from
+0), or name. Therefore, the two commands below are identical for this file:
 
 @example
-$ astnoisechisel -P
 $ astfits xdf-f160w_detected.fits -h0
+$ astfits xdf-f160w_detected.fits -hNOISECHISEL-CONFIG
 @end example
 
 The first group of FITS header keywords are standard keywords (containing
 the @code{SIMPLE} and @code{BITPIX} keywords the first empty line). They
 are required by the FITS standard and must be present in any FITS
-extension. The second group contain the input file and all the options with
-their values in that run of NoiseChisel. Finally, the last group contain
-the date and version information of Gnuastro and its dependencies. The
-``versions and date'' group of keywords are present in all Gnuastro's FITS
-extension outputs, for more see @ref{Output FITS files}.
+extension. The second group contains the input file and all the options
+with their values in that run of NoiseChisel. Finally, the last group
+contains the date and version information of Gnuastro and its
+dependencies. The ``versions and date'' group of keywords are present in
+all Gnuastro's FITS extension outputs, for more see @ref{Output FITS
+files}.
 
 Note that if a keyword name is larger than 8 characters, it is preceded by
 a @code{HIERARCH} keyword and that all keyword names are in capital
@@ -3140,6 +3358,12 @@ $ astnoisechisel -P                   | grep    snminarea
 $ astfits xdf-f160w_detected.fits -h0 | grep -i snminarea
 @end example
 
+@noindent
+The metadata (that is stored in the output) can later be used to exactly
+reproduce/understand your result, even if you have lost/forgot the command
+you used to create the file. This feature is present in all of Gnuastro's
+programs, not just NoiseChisel.
+
 @cindex DS9
 @cindex GNOME
 @cindex SAO DS9
@@ -3168,30 +3392,59 @@ region. Just have in mind that NoiseChisel's job is 
@emph{only} detection
 (separating signal from noise), We'll do segmentation on this result later
 to find the individual galaxies/peaks over the detected pixels.
 
+Each HDU/extension in a FITS file is an independent dataset (image or
+table) which you can delete from the FITS file, or copy/cut to another
+file. For example, with the command below, you can copy NoiseChisel's
+@code{DETECTIONS} HDU/extension to another file:
+
+@example
+$ astfits xdf-f160w_detected.fits --copy=DETECTIONS -odetections.fits
+@end example
+
+There are similar options to conveniently cut (@option{--cut}, copy, then
+remove from the input) or delete (@option{--remove}) HDUs from a FITS file
+also. See @ref{HDU manipulation} for more.
+
+
+
+@node NoiseChisel optimization for detection, NoiseChisel optimization for 
storage, Multiextension FITS files NoiseChisel's output, General program usage 
tutorial
+@subsection NoiseChisel optimization for detection
+In @ref{Multiextension FITS files NoiseChisel's output}, we ran NoiseChisel
+and reviewed NoiseChisel's output format. Now that you have a better
+feeling for multi-extension FITS files, let's optimize NoiseChisel for this
+particular dataset.
+
 One good way to see if you have missed any signal (small galaxies, or the
 wings of brighter galaxies) is to mask all the detected pixels and inspect
 the noise pixels. For this, you can use Gnuastro's Arithmetic program (in
-particular its @code{where} operator, see @ref{Arithmetic operators}). With
-the command below, all detected pixels (in the @code{DETECTIONS} extension)
-will be set to NaN in the output (@file{nc-masked.fits}). To make the
-command easier to read/write, let's just put the file name in a shell
-variable (@code{img}) first. A shell variable's value can be retrieved by
-adding a @code{$} before its name.
+particular its @code{where} operator, see @ref{Arithmetic operators}). The
+command below will produce @file{mask-det.fits}. In it, all the pixels in
+the @code{INPUT-NO-SKY} extension that are flagged 1 in the
+@code{DETECTIONS} extension (dominated by signal, not noise) will be set to
+NaN.
+
+Since the various extensions are in the same file, for each dataset we need
+the file and extension name. To make the command easier to
+read/write/understand, let's use shell variables: `@code{in}' will be used
+for the Sky-subtracted input image and `@code{det}' will be used for the
+detection map. Recall that a shell variable's value can be retrieved by
+adding a @code{$} before its name, also note that the double quotations are
+necessary when we have white-space characters in a variable name (like this
+case).
 
 @example
-$ img=xdf-f160w_detected.fits
-$ astarithmetic $img $img nan where -hINPUT-NO-SKY -hDETECTIONS      \
-                --output=mask-det.fits
+$ in="xdf-f160w_detected.fits -hINPUT-NO-SKY"
+$ det="xdf-f160w_detected.fits -hDETECTIONS"
+$ astarithmetic $in $det nan where --output=mask-det.fits
 @end example
 
 @noindent
-To invert the result (only keep the values of detected pixels), you can
-flip the detected pixel values (from 0 to 1 and vice-versa) by adding a
-@code{not} after the second @code{$img}:
+To invert the result (only keep the detected pixels), you can flip the
+detection map (from 0 to 1 and vice-versa) by adding a `@code{not}' after
+the second @code{$det}:
 
 @example
-$ astarithmetic $img $img not nan where -hINPUT-NO-SKY -hDETECTIONS  \
-                --output=mask-sky.fits
+$ astarithmetic $in $det not nan where --output=mask-sky.fits
 @end example
 
 Looking again at the detected pixels, we see that there are thin
@@ -3230,7 +3483,8 @@ $ astnoisechisel --help | grep check
 Let's check the overall detection process to get a better feeling of what
 NoiseChisel is doing with the following command. To learn the details of
 NoiseChisel in more detail, please see
-@url{https://arxiv.org/abs/1505.01664, Akhlaghi and Ichikawa [2015]}.
+@url{https://arxiv.org/abs/1505.01664, Akhlaghi and Ichikawa [2015]}. Also
+see @ref{NoiseChisel changes after publication}.
 
 @example
 $ astnoisechisel flat-ir/xdf-f160w.fits --checkdetection
@@ -3239,10 +3493,18 @@ $ astnoisechisel flat-ir/xdf-f160w.fits --checkdetection
 The check images/tables are also multi-extension FITS files.  As you saw
 from the command above, when check datasets are requested, NoiseChisel
 won't go to the end. It will abort as soon as all the extensions of the
-check image are ready. Try listing the extensions of the output with
-@command{astfits} and then opening them with @command{ds9} as we done
-above. In order to understand the parameters and their biases (especially
-as you are starting to use Gnuastro, or running it a new dataset), it is
+check image are ready. Please list the extensions of the output with
+@command{astfits} and then opening it with @command{ds9} as we done
+above. If you have read the paper, you will see why there are so many
+extensions in the check image.
+
+@example
+$ astfits xdf-f160w_detcheck.fits
+$ ds9 -mecube xdf-f160w_detcheck.fits -zscale -zoom to fit
+@end example
+
+In order to understand the parameters and their biases (especially as you
+are starting to use Gnuastro, or running it a new dataset), it is
 @emph{strongly} encouraged to play with the different parameters and use
 the respective check images to see which step is affected by your changes
 and how, for example see @ref{Detecting large extended targets}.
@@ -3254,8 +3516,8 @@ already present here (a relatively early stage in the 
processing). Such
 connections at the lowest surface brightness limits usually occur when the
 dataset is too smoothed. Because of correlated noise, the dataset is
 already artificially smoothed, therefore further smoothing it with the
-default kernel may be the problem. Therefore, one solution is to use a
-sharper kernel (NoiseChisel's first step in its processing).
+default kernel may be the problem. One solution is thus to use a sharper
+kernel (NoiseChisel's first step in its processing).
 
 By default NoiseChisel uses a Gaussian with full-width-half-maximum (FWHM)
 of 2 pixels. We can use Gnuastro's MakeProfiles to build a kernel with FWHM
@@ -3282,36 +3544,47 @@ Looking at the @code{OPENED_AND_LABELED} extension, we 
see that the thin
 connections between smaller peaks has now significantly decreased. Going
 two extensions/steps ahead (in the first @code{HOLES-FILLED}), you can see
 that during the process of finding false pseudo-detections, too many holes
-have been filled: see how the many of the brighter galaxies are connected?
+have been filled: do you see how the many of the brighter galaxies are
+connected? At this stage all holes are filled, irrespective of their size.
 
 Try looking two extensions ahead (in the first @code{PSEUDOS-FOR-SN}), you
 can see that there aren't too many pseudo-detections because of all those
 extended filled holes. If you look closely, you can see the number of
-pseudo-detections in the result NoiseChisel prints (around 4000). This is
+pseudo-detections in the result NoiseChisel prints (around 5000). This is
 another side-effect of correlated noise. To address it, we should slightly
-increase the pseudo-detection threshold (@option{--dthresh}, run with
-@option{-P} to see its default value):
+increase the pseudo-detection threshold (before changing
+@option{--dthresh}, run with @option{-P} to see the default value):
 
 @example
-$ astnoisechisel flat-ir/xdf-f160w.fits --kernel=kernel.fits  \
-                 --dthresh=0.2 --checkdetection
+$ astnoisechisel flat-ir/xdf-f160w.fits --kernel=kernel.fits \
+                 --dthresh=0.1 --checkdetection
 @end example
 
-Before visually inspecting the check image, you can see the effect of this
-change in NoiseChisel's command-line output: notice how the number of
-pseudos has increased to roughly 5500. Open the check image now and have a
-look, you can see how the pseudo-detections are distributed much more
-evenly in the image. The signal-to-noise ratio of pseudo-detections define
-NoiseChisel's reference for removing false detections, so they are very
-important to get right. Let's have a look at their signal-to-noise
-distribution with @option{--checksn}.
+Before visually inspecting the check image, you can already see the effect
+of this change in NoiseChisel's command-line output: notice how the number
+of pseudos has increased to more than 6000. Open the check image now and
+have a look, you can see how the pseudo-detections are distributed much
+more evenly in the image.
+
+@cartouche
+@noindent
+@strong{Maximize the number of pseudo-detecitons:} For a new noise-pattern
+(different instrument), play with @code{--dthresh} until you get a maximal
+number of pseudo-detections (the total number of pseudo-detections is
+printed on the command-line when you run NoiseChisel).
+@end cartouche
+
+The signal-to-noise ratio of pseudo-detections define NoiseChisel's
+reference for removing false detections, so they are very important to get
+right. Let's have a look at their signal-to-noise distribution with
+@option{--checksn}.
 
 @example
 $ astnoisechisel flat-ir/xdf-f160w.fits --kernel=kernel.fits  \
-                 --dthresh=0.2 --checkdetection --checksn
+                 --dthresh=0.1 --checkdetection --checksn
 @end example
 
-The output @file{xdf-f160w_detsn.fits} file contains two extensions for the
+The output (@file{xdf-f160w_detsn.fits}) contains two extensions for the
 pseudo-detections over the undetected (sky) regions and those over
 detections. The first column is the pseudo-detection label which you can
 see in the respective@footnote{The first @code{PSEUDOS-FOR-SN} in
@@ -3319,12 +3592,12 @@ see in the respective@footnote{The first 
@code{PSEUDOS-FOR-SN} in
 undetected regions and the second is for those over detected regions.}
 @code{PSEUDOS-FOR-SN} extension of @file{xdf-f160w_detcheck.fits}. You can
 see the table columns with the first command below and get a feeling for
-its distribution with the second command. We'll discuss the two Table and
-Statistics programs later.
+its distribution with the second command (the two Table and Statistics
+programs will be discussed later in the tutorial)
 
 @example
-$ asttable xdf-f160w_detsn.fits
-$ aststatistics xdf-f160w_detsn.fits -c2
+$ asttable xdf-f160w_detsn.fits -hSKY_PSEUDODET_SN
+$ aststatistics xdf-f160w_detsn.fits -hSKY_PSEUDODET_SN -c2
 @end example
 
 The correlated noise is again visible in this pseudo-detection
@@ -3334,7 +3607,7 @@ the difference between the three 0.99, 0.95 and 0.90 
quantiles with this
 command:
 
 @example
-$ aststatistics xdf-f160w_detsn.fits -c2                        \
+$ aststatistics xdf-f160w_detsn.fits -hSKY_PSEUDODET_SN -c2      \
                 --quantile=0.99 --quantile=0.95 --quantile=0.90
 @end example
 
@@ -3345,14 +3618,15 @@ detections). With the @command{aststatistics} command 
above, you see that a
 small number of extra false detections (impurity) in the final result
 causes a big change in completeness (you can detect more lower
 signal-to-noise true detections). So let's loosen-up our desired purity
-level and then mask the detected pixels like before to see if we have
-missed anything.
+level, remove the check-image options, and then mask the detected pixels
+like before to see if we have missed anything.
 
 @example
 $ astnoisechisel flat-ir/xdf-f160w.fits --kernel=kernel.fits  \
-                 --dthresh=0.2 --snquant=0.95
-$ img=xdf-f160w_detected.fits
-$ astarithmetic $img $img nan where -h1 -h2 --output=mask-det.fits
+                 --dthresh=0.1 --snquant=0.95
+$ in="xdf-f160w_detected.fits -hINPUT-NO-SKY"
+$ det="xdf-f160w_detected.fits -hDETECTIONS"
+$ astarithmetic $in $det nan where --output=mask-det.fits
 @end example
 
 Overall it seems good, but if you play a little with the color-bar and look
@@ -3371,34 +3645,14 @@ will see many of those sharp objects are now detected.
 
 @example
 $ astnoisechisel flat-ir/xdf-f160w.fits --kernel=kernel.fits     \
-                 --noerodequant=0.95 --dthresh=0.2 --snquant=0.95
+                 --noerodequant=0.95 --dthresh=0.1 --snquant=0.95
 @end example
 
-This seems to be fine and we can continue with our analysis. Before finally
-running NoiseChisel, let's just see how you can have all the raw outputs of
-NoiseChisel (Detection map and Sky and Sky Standard deviation) in a highly
-compressed format for archivability. For example the Sky-subtracted input
-is a redundant dataset: you can always generate it by subtracting the Sky
-from the input image. With the commands below you can turn the default
-NoiseChisel output that is larger than 100 megabytes in this case into
-about 200 kilobytes by removing all the redundant information in it, then
-compressing it:
-
-@example
-$ astnoisechisel flat-ir/xdf-f160w.fits --oneelempertile --rawoutput
-$ gzip --best xdf-f160w_detected.fits
-@end example
-
-You can open @file{xdf-f160w_detected.fits.gz} directly in SAO DS9 or feed
-it to any of Gnuastro's programs without having to uncompress
-it. Higher-level programs that take NoiseChisel's output as input can also
-deal with this compressed image where the Sky and its Standard deviation
-are one pixel-per-tile.
-
-To avoid having to write these options on every call to NoiseChisel, we'll
-just make a configuration file in a visible @file{config} directory. Then
-we'll define the hidden @file{.gnuastro} directory (that all Gnuastro's
-programs will look into for configuration files) as a symbolic link to the
+This seems to be fine and we can continue with our analysis. To avoid
+having to write these options on every call to NoiseChisel, we'll just make
+a configuration file in a visible @file{config} directory. Then we'll
+define the hidden @file{.gnuastro} directory (that all Gnuastro's programs
+will look into for configuration files) as a symbolic link to the
 @file{config} directory. Finally, we'll write the finalized values of the
 options into NoiseChisel's standard configuration file within that
 directory. We'll also put the kernel in a separate directory to keep the
@@ -3407,11 +3661,11 @@ top directory clean of any files we later need.
 @example
 $ mkdir kernel config
 $ ln -s config/ .gnuastro
-$ mv kernel.fits det-kernel.fits
-$ echo "kernel kernel/det-kernel.fits" > config/astnoisechisel.conf
-$ echo "noerodequant 0.95"            >> config/astnoisechisel.conf
-$ echo "dthresh      0.2"             >> config/astnoisechisel.conf
-$ echo "snquant      0.95"            >> config/astnoisechisel.conf
+$ mv kernel.fits kernel/noisechisel.fits
+$ echo "kernel kernel/noisechisel.fits" > config/astnoisechisel.conf
+$ echo "noerodequant 0.95"             >> config/astnoisechisel.conf
+$ echo "dthresh      0.1"              >> config/astnoisechisel.conf
+$ echo "snquant      0.95"             >> config/astnoisechisel.conf
 @end example
 
 @noindent
@@ -3424,80 +3678,90 @@ $ astnoisechisel flat-ir/xdf-f160w.fits 
--output=nc/xdf-f160w.fits
 $ astnoisechisel flat-ir/xdf-f105w.fits --output=nc/xdf-f105w.fits
 @end example
 
-Before continuing with the higher-level processing of this dataset, let's
-pause to use NoiseChisel's multi-extension output as a demonstration for
-working with FITS extensions using Gnuastro's Fits program (see @ref{Fits}.
 
-Let's say you need to copy a HDU/extension (image or table) from one FITS
-file to another. After the command below, @file{objects.fits} file will
-contain only one extension: a copy of NoiseChisel's binary detection
-map. There are similar options to conveniently cut (@option{--cut}, copy,
-then remove from the input) or delete (@option{--remove}) HDUs from a FITS
-file also.
+@node NoiseChisel optimization for storage, Segmentation and making a catalog, 
NoiseChisel optimization for detection, General program usage tutorial
+@subsection NoiseChisel optimization for storage
+
+As we showed before (in @ref{Multiextension FITS files NoiseChisel's
+output}), NoiseChisel's output is a multi-extension FITS file with several
+images the same size as the input. As the input datasets get larger this
+output can become hard to manage and waste a lot of storage
+space. Fortunately there is a solution to this problem (which is also
+useful for Segment's outputs). But first, let's have a look at the volume
+of NoiseChisel's output from @ref{NoiseChisel optimization for detection}
+(fast answer, its larger than 100 mega-bytes):
 
 @example
-$ astfits nc/xdf-f160w.fits --copy=DETECTIONS -odetections.fits
+$ ls -lh nc/xdf-f160w.fits
 @end example
 
-NoiseChisel puts some general information on its outputs in the FITS header
-of the respective extension. To see the full list of keywords in an
-extension, you can again use the Fits program like above. But instead of
-HDU manipulation options, give it the HDU you are interested in with
-@option{-h}. You can also give the HDU number (as listed in the output
-above), for example @option{-h2} instead of @option{-hDETECTIONS}.
+Two options can drastically decrease NoiseChisel's output file size: 1)
+With the @option{--rawoutput} option, NoiseChisel won't create a
+Sky-subtracted input. After all, it is redundant: you can always generate
+it by subtracting the Sky from the input image (which you have in your
+database) using the Arithmetic program. 2) With the
+@option{--oneelempertile}, you can tell NoiseChisel to store its Sky and
+Sky standard deviation results with one pixel per tile (instead of many
+pixels per tile).
 
 @example
-$ astfits nc/xdf-f160w.fits -hDETECTIONS
+$ astnoisechisel flat-ir/xdf-f160w.fits --oneelempertile --rawoutput
 @end example
 
-@cindex GNU Grep
-The @code{DETSN} keyword in NoiseChisel's @code{DETECTIONS} extension
-contains the true pseudo-detection signal-to-noise ratio that was found by
-NoiseChisel on the dataset. It is not easy to find it in the middle of all
-the other keywords printed by the command above (especially in files that
-have many more keywords). To fix the problem, you can pipe the output of
-the command above into @code{grep} (a program for matching lines which is
-available on almost all Unix-like operating systems).
+@noindent
+The output is now just under 8 mega byes! But you can even be more
+efficient in space by compressing it. Try the command below to see how
+NoiseChisel's output has now shrunk to about 250 kilobyes while keeping all
+the necessary information as the original 100 mega-byte output.
 
 @example
-$ astfits nc/xdf-f160w.fits -hDETECTIONS | grep DETSN
+$ gzip --best xdf-f160w_detected.fits
+$ ls -lh xdf-f160w_detected.fits.gz
 @end example
 
-@cindex GNU Grep
-If you just want the value of the keyword and not the full FITS keyword
-line, you can use AWK. In the example below, AWK will print the third word
-(separated by white space characters) in any line that has a first column
-value of @code{DETSN}.
+We can get this wonderful level of compression because NoiseChisel's output
+is binary with only two values: 0 and 1. Compression algorithms are highly
+optimized in such scenarios.
+
+You can open @file{xdf-f160w_detected.fits.gz} directly in SAO DS9 or feed
+it to any of Gnuastro's programs without having to uncompress
+it. Higher-level programs that take NoiseChisel's output can also deal with
+this compressed image where the Sky and its Standard deviation are one
+pixel-per-tile.
 
-@example
-$ astfits nc/xdf-f160w.fits -h2 | awk '$1=="DETSN" @{print $3@}'
-@end example
 
+
+@node Segmentation and making a catalog, Working with catalogs estimating 
colors, NoiseChisel optimization for storage, General program usage tutorial
+@subsection Segmentation and making a catalog
 The main output of NoiseChisel is the binary detection map
-(@code{DETECTIONS} extension), which only has two values of 1 or 0. This is
-useful when studying the noise, but hardly of any use when you actually
-want to study the targets/galaxies in the image, especially in such a deep
-field where the detection map of almost everything is connected. To find
-the galaxies over the detections, we'll use Gnuastro's @ref{Segment}
-program:
+(@code{DETECTIONS} extension, see @ref{NoiseChisel optimization for
+detection}). which only has two values of 1 or 0. This is useful when
+studying the noise, but hardly of any use when you actually want to study
+the targets/galaxies in the image, especially in such a deep field where
+the detection map of almost everything is connected. To find the galaxies
+over the detections, we'll use Gnuastro's @ref{Segment} program:
 
 @example
-$ rm *.fits
 $ mkdir seg
 $ astsegment nc/xdf-f160w.fits -oseg/xdf-f160w.fits
 @end example
 
 Segment's operation is very much like NoiseChisel (in fact, prior to
-version 0.6, it was part of NoiseChisel), for example the output is a
+version 0.6, it was part of NoiseChisel). For example the output is a
 multi-extension FITS file, it has check images and uses the undetected
 regions as a reference. Please have a look at Segment's multi-extension
-output with @command{ds9} to get a good feeling of what it has done. Like
-NoiseChisel, the first extension is the input. The @code{CLUMPS} extension
-shows the true ``clumps'' with values that are @mymath{\ge1}, and the
-diffuse regions labeled as @mymath{-1}. In the @code{OBJECTS} extension, we
-see that the large detections of NoiseChisel (that may have contained many
-galaxies) are now broken up into separate labels. see @ref{Segment} for
-more.
+output with @command{ds9} to get a good feeling of what it has done.
+
+@example
+$ ds9 -mecube seg/xdf-f160w.fits -zscale -zoom to fit
+@end example
+
+Like NoiseChisel, the first extension is the input. The @code{CLUMPS}
+extension shows the true ``clumps'' with values that are @mymath{\ge1}, and
+the diffuse regions labeled as @mymath{-1}. In the @code{OBJECTS}
+extension, we see that the large detections of NoiseChisel (that may have
+contained many galaxies) are now broken up into separate labels. see
+@ref{Segment} for more.
 
 Having localized the regions of interest in the dataset, we are ready to do
 measurements on them with @ref{MakeCatalog}. Besides the IDs, we want to
@@ -3521,69 +3785,6 @@ From the printed statements on the command-line, you see 
that MakeCatalog
 read all the extensions in Segment's output for the various measurements it
 needed.
 
-The output of the MakeCatalog command above is a FITS table. The two clump
-and object catalogs are available in the two extensions of the single FITS
-file@footnote{MakeCatalog can also output plain text tables. However, in
-the plain text format you can only have one table per file. Therefore, if
-you also request measurements on clumps, two plain text tables will be
-created (suffixed with @file{_o.txt} and @file{_c.txt}).}. Let's inspect
-the separate extensions with the Fits program like before (as shown
-below). Later, we'll inspect the table in each extension with Gnuastro's
-Table program (see @ref{Table}). Note that we could have used
-@option{-hOBJECTS} and @option{-hCLUMPS} instead of @option{-h1} and
-@option{-h2} respectively.
-
-@example
-$ astfits  cat/xdf-f160w.fits              # Extension information
-$ asttable cat/xdf-f160w.fits -h1 --info   # Objects catalog info.
-$ asttable cat/xdf-f160w.fits -h1          # Objects catalog columns.
-$ asttable cat/xdf-f160w.fits -h2 -i       # Clumps catalog info.
-$ asttable cat/xdf-f160w.fits -h2          # Clumps catalog columns.
-@end example
-
-As you see above, when given a specific table (file name and extension),
-Table will print the full contents of all the columns. To see basic
-information about each column (for example name, units and comments),
-simply append a @option{--info} (or @option{-i}).
-
-To print the contents of special column(s), just specify the column
-number(s) (counting from @code{1}) or the column name(s) (if they have
-one). For example, if you just want the magnitude and signal-to-noise ratio
-of the clumps (in @option{-h2}), you can get it with any of the following
-commands
-
-@example
-$ asttable cat/xdf-f160w.fits -h2 -c5,6
-$ asttable cat/xdf-f160w.fits -h2 -c5,SN
-$ asttable cat/xdf-f160w.fits -h2 -c5         -c6
-$ asttable cat/xdf-f160w.fits -h2 -cMAGNITUDE -cSN
-@end example
-
-In the example above, the clumps catalog has two ID columns (one for the
-over-all clump ID and one for the ID of the clump in its host object),
-while the objects catalog only has one ID column. Therefore, the location
-of the magnitude column differs between the object and clumps catalog. So
-if you want to specify the columns by number, you will need to change the
-numbers when viewing the clump and objects catalogs. This is a useful
-advantage of having/using column names@footnote{Column meta-data (including
-a name) can also be specified in plain text tables, see @ref{Gnuastro text
-table format}.}.
-
-@example
-$ asttable catalog/xdf-f160w.fits -h1 -c4 -c5
-$ asttable catalog/xdf-f160w.fits -h2 -c5 -c6
-@end example
-
-Finally, the comments in MakeCatalog's output (@code{COMMENT} keywords in
-the FITS headers, or lines starting with @code{#} in plain text) contain
-some important information about the input dataset that can be useful (for
-example pixel area or per-pixel surface brightness limit). For example have
-a look at the output of this command:
-
-@example
-$ astfits cat/xdf-f160w.fits -h1 | grep COMMENT
-@end example
-
 To calculate colors, we also need magnitude measurements on the F105W
 filter. However, the galaxy properties might differ between the filters
 (which is the whole purpose behind measuring colors). Also, the noise
@@ -3598,12 +3799,12 @@ same pixels on both images.
 
 The F160W image is deeper, thus providing better detection/segmentation,
 and redder, thus observing smaller/older stars and representing more of the
-mass in the galaxies. We will thus use the pixel labels generated on the
-F160W filter, but do the measurements on the F105W filter (using the
-@option{--valuesfile} option) in the command below. Notice how the only
-difference between this call to MakeCatalog and the previous one is
-@option{--valuesfile}, the value given to @code{--zeropoint} and the output
-name.
+mass in the galaxies. To generate the F105W catalog, we will thus use the
+pixel labels generated on the F160W filter, but do the measurements on the
+F105W filter (using MakeCatalog's @option{--valuesfile} option). Notice how
+the only difference between this call to MakeCatalog and the previous one
+is @option{--valuesfile}, the value given to @code{--zeropoint} and the
+output name.
 
 @example
 $ astmkcatalog seg/xdf-f160w.fits --ids --ra --dec --magnitude --sn \
@@ -3622,6 +3823,68 @@ hard-to-deblend and low signal-to-noise diffuse regions, 
they are more
 robust for calculating the colors (compared to objects). Therefore from
 this step onward, we'll continue with clumps.
 
+Finally, the comments in MakeCatalog's output (@code{COMMENT} keywords in
+the FITS headers, or lines starting with @code{#} in plain text) contain
+some important information about the input datasets and other useful info
+(for example pixel area or per-pixel surface brightness limit). You can see
+them with this command:
+
+@example
+$ astfits cat/xdf-f160w.fits -h1 | grep COMMENT
+@end example
+
+
+@node Working with catalogs estimating colors, Aperture photomery, 
Segmentation and making a catalog, General program usage tutorial
+@subsection Working with catalogs (estimating colors)
+The output of the MakeCatalog command above is a FITS table (see
+@ref{Segmentation and making a catalog}). The two clump and object catalogs
+are available in the two extensions of the single FITS
+file@footnote{MakeCatalog can also output plain text tables. However, in
+the plain text format you can only have one table per file. Therefore, if
+you also request measurements on clumps, two plain text tables will be
+created (suffixed with @file{_o.txt} and @file{_c.txt}).}. Let's see the
+extensions and their basic properties with the Fits program:
+
+@example
+$ astfits  cat/xdf-f160w.fits              # Extension information
+@end example
+
+Now, let's inspect the table in each extension with Gnuastro's Table
+program (see @ref{Table}). Note that we could have used @option{-hOBJECTS}
+and @option{-hCLUMPS} instead of @option{-h1} and @option{-h2}
+respectively.
+
+@example
+$ asttable cat/xdf-f160w.fits -h1 --info   # Objects catalog info.
+$ asttable cat/xdf-f160w.fits -h1          # Objects catalog columns.
+$ asttable cat/xdf-f160w.fits -h2 -i       # Clumps catalog info.
+$ asttable cat/xdf-f160w.fits -h2          # Clumps catalog columns.
+@end example
+
+As you see above, when given a specific table (file name and extension),
+Table will print the full contents of all the columns. To see the basic
+metadata about each column (for example name, units and comments), simply
+append a @option{--info} (or @option{-i}) to the command.
+
+To print the contents of special column(s), just specify the column
+number(s) (counting from @code{1}) or the column name(s) (if they have
+one). For example, if you just want the magnitude and signal-to-noise ratio
+of the clumps (in @option{-h2}), you can get it with any of the following
+commands
+
+@example
+$ asttable cat/xdf-f160w.fits -h2 -c5,6
+$ asttable cat/xdf-f160w.fits -h2 -c5,SN
+$ asttable cat/xdf-f160w.fits -h2 -c5         -c6
+$ asttable cat/xdf-f160w.fits -h2 -cMAGNITUDE -cSN
+@end example
+
+Using column names instead of numbers has many advantages: 1) you don't
+have to worry about the order of columns in the table. 2) It acts as a
+documentation in the script. Column meta-data (including a name) aren't
+just limited to FITS tables and can also be used in plain text tables, see
+@ref{Gnuastro text table format}.
+
 We can finally calculate the colors of the objects from these two
 datasets. If you inspect the contents of the two catalogs, you'll notice
 that because they were both derived from the same segmentation maps, the
@@ -3636,15 +3899,15 @@ the options relating to each catalog are placed under 
it for easy
 understanding. You give Match two catalogs (from the two different filters
 we derived above) as argument, and the HDUs containing them (if they are
 FITS files) with the @option{--hdu} and @option{--hdu2} options. The
-@option{--ccol1} and @option{--ccol2} options specify which columns should
-be matched with which in the two catalogs. With @option{--aperture} you
-specify the acceptable error (radius in 2D), in the same units as the
-columns (see below for why we have requested an aperture of 0.35
-arcseconds, or less than 6 HST pixels).
-
-The @option{--outcols} is a very convenient feature in Match: you can use
-it to specify which columns from the two catalogs you want in the output
-(merge two input catalogs into one). If the first character is an
+@option{--ccol1} and @option{--ccol2} options specify the
+coordinate-columns which should be matched with which in the two
+catalogs. With @option{--aperture} you specify the acceptable error (radius
+in 2D), in the same units as the columns (see below for why we have
+requested an aperture of 0.35 arcseconds, or less than 6 HST pixels).
+
+The @option{--outcols} of Match is a very convenient feature in Match: you
+can use it to specify which columns from the two catalogs you want in the
+output (merge two input catalogs into one). If the first character is an
 `@key{a}', the respective matched column (number or name, similar to Table
 above) in the first catalog will be written in the output table. When the
 first character is a `@key{b}', the respective column from the second
@@ -3661,24 +3924,21 @@ $ astmatch cat/xdf-f160w.fits           
cat/xdf-f105w.fits         \
            --output=cat/xdf-f160w-f105w.fits
 @end example
 
-By default (when @option{--quiet} isn't called), the Match program will
-just print the number of matched rows in the standard output. If you have a
-look at your input catalogs, this should be the same as the number of rows
-in them. Let's have a look at the columns in the matched catalog:
+Let's have a look at the columns in the matched catalog:
 
 @example
 $ asttable cat/xdf-f160w-f105w.fits -i
 @end example
 
-Indeed, its exactly the columns we wanted. There is just one confusion
-however: there are two @code{MAGNITUDE} and @code{SN} columns. Right now,
-you know that the first one was from the F160W filter, and the second was
-for F105W. But in one hour, you'll start doubting your self: going through
-your command history, trying to answer this question: ``which magnitude
-corresponds to which filter?''. You should never torture your future-self
-(or colleagues) like this! So, let's rename these confusing columns in the
-matched catalog. The FITS standard for tables stores the column names in
-the @code{TTYPE} header keywords, so let's have a look:
+Indeed, its exactly the columns we wanted: there are two @code{MAGNITUDE}
+and @code{SN} columns. The first is from the F160W filter, the second is
+from the F105W. Right now, you know this. But in one hour, you'll start
+doubting your self: going through your command history, trying to answer
+this question: ``which magnitude corresponds to which filter?''. You should
+never torture your future-self (or colleagues) like this! So, let's rename
+these confusing columns in the matched catalog. The FITS standard for
+tables stores the column names in the @code{TTYPE} header keywords, so
+let's have a look:
 
 @example
 $ astfits cat/xdf-f160w-f105w.fits -h1 | grep TTYPE
@@ -3695,14 +3955,14 @@ $ astfits cat/xdf-f160w-f105w.fits -h1                  
        \
 $ asttable cat/xdf-f160w-f105w.fits -i
 @end example
 
-
-If you noticed, when running Match, the previous command, we also asked for
-@option{--log}. Many Gnuastro programs have this option to provide some
-detailed information on their operation in case you are curious. Here, we
-are using it to justify the value we gave to @option{--aperture}. Even
-though you asked for the output to be written in the @file{cat} directory,
-a listing of the contents of your current directory will show you an extra
-@file{astmatch.fits} file. Let's have a look at what columns it contains.
+If you noticed, when running Match, we also asked for a log file
+(@option{--log}). Many Gnuastro programs have this option to provide some
+detailed information on their operation in case you are curious or want to
+debug something. Here, we are using it to justify the value we gave to
+@option{--aperture}. Even though you asked for the output to be written in
+the @file{cat} directory, a listing of the contents of your current
+directory will show you an extra @file{astmatch.fits} file. Let's have a
+look at what columns it contains.
 
 @example
 $ ls
@@ -3746,14 +4006,14 @@ Gnuastro has a simple program for basic statistical 
analysis. The command
 below will print some basic information about the distribution (minimum,
 maximum, median and etc), along with a cute little ASCII histogram to
 visually help you understand the distribution on the command-line without
-the need for a graphic user interface (see @ref{Invoking
-aststatistics}). This ASCII histogram can be useful when you just want some
-coarse and general information on the input dataset. It is also useful when
-working on a server (where you may not have graphic user interface), and
-finally, its fast.
+the need for a graphic user interface. This ASCII histogram can be useful
+when you just want some coarse and general information on the input
+dataset. It is also useful when working on a server (where you may not have
+graphic user interface), and finally, its fast.
 
 @example
 $ aststatistics astmatch.fits -cMATCH_DIST
+$ rm astmatch.fits
 @end example
 
 The units of this column are the same as the columns you gave to Match: in
@@ -3761,28 +4021,34 @@ degrees. You see that while almost all the objects 
matched very nicely, the
 maximum distance is roughly 0.31 arcseconds. This is why we asked for an
 aperture of 0.35 arcseconds when doing the match.
 
-We can now use AWK to find the colors. We'll ask AWK to only use rows that
-don't have a NaN magnitude in either filter@footnote{This can happen even
-on the reference image. It is because of the current way clumps are defined
-in Segment when they are placed on strong gradients. It is because of high
-``river'' values on such gradients. See @ref{Segment changes after
-publication}. To avoid this problem, you can currently ask for the
-@option{--brighntessnoriver} output column.}. We will also ignore columns
-which don't have reliable F105W measurement (with a S/N less than
+Gnuastro's Table program can also be used to measure the colors using the
+command below. As before, the @option{-c1,2} option will tell Table to
+print the first two columns. With the @option{--range=SN_F160W,7,inf} we
+only keep the rows that have a F160W signal-to-noise ratio larger than
 7@footnote{The value of 7 is taken from the clump S/N threshold in F160W
-(where the clumps were defined).}).
+(where the clumps were defined).}.
+
+Finally, for estimating the colors, we use Table's column arithmetic
+feature. It uses the same notation as the Arithmetic program (see
+@ref{Reverse polish notation}), with almost all the same operators (see
+@ref{Arithmetic operators}). You can use column arithmetic in any output
+column, just put the value in double quotations and start the value with
+@code{arith} (followed by a space) like below. In column-arithmetic, you
+can identify columns by number or name, see @ref{Column arithmetic}.
 
 @example
-$ asttable cat/xdf-f160w-f105w.fits -cMAG_F160W,MAG_F105W,SN_F105W  \
-           | awk '$1!="nan" && $2!="nan" && $3>7 @{print $2-$1@}'     \
-           > f105w-f160w.txt
+$ asttable cat/xdf-f160w-f105w.fits -ocat/f105w-f160w.fits \
+           -c1,2,RA,DEC,"arith MAG_F105W MAG_F160W -"      \
+           --range=SN_F160W,7,inf
 @end example
 
-You can inspect the distribution of colors with the Statistics program
-again:
+@noindent
+You can inspect the distribution of colors with the Statistics program. But
+first, let's give the color column a proper name.
 
 @example
-$ aststatistics f105w-f160w.txt -c1
+$ astfits cat/f105w-f160w.fits --update=TTYPE5,COLOR_F105W_F160W
+$ aststatistics cat/f105w-f160w.fits -cCOLOR_F105W_F160W
 @end example
 
 You can later use Gnuastro's Statistics program with the
@@ -3793,22 +4059,27 @@ just want a specific measure, for example the mean, 
median and standard
 deviation, you can ask for them specifically with this command:
 
 @example
-$ aststatistics f105w-f160w.txt -c1 --mean --median --std
+$ aststatistics cat/f105w-f160w.fits -cCOLOR_F105W_F160W \
+                --mean --median --std
 @end example
 
+
+@node Aperture photomery, Finding reddest clumps and visual inspection, 
Working with catalogs estimating colors, General program usage tutorial
+@subsection Aperture photomery
 Some researchers prefer to have colors in a fixed aperture for all the
-objects. The colors we calculated above used a different segmentation map
-for each object. This might not satisfy some science cases. So, let's make
-a fixed aperture catalog. To make an catalog from fixed apertures, we
+objects. The colors we calculated in @ref{Working with catalogs estimating
+colors} used a different segmentation map for each object. This might not
+satisfy some science cases. To make a catalog from fixed apertures, we
 should make a labeled image which has a fixed label for each aperture. That
 labeled image can be given to MakeCatalog instead of Segment's labeled
 detection image.
 
 @cindex GNU AWK
-To generate the apertures catalog, we'll first read the positions from
-F160W catalog and set the other parameters of each profile to be a fixed
-circle of radius 5 pixels (we want all apertures to be identical in this
-scenario).
+To generate the apertures catalog we'll use Gnuastro's MakeProfiles (see
+@ref{MakeProfiles}). We'll first read the clump positions from the F160W
+catalog, then use AWK to set the other parameters of each profile to be a
+fixed circle of radius 5 pixels (recall that we want all apertures to be
+identical in this scenario).
 
 @example
 $ rm *.fits *.txt
@@ -3817,14 +4088,15 @@ $ asttable cat/xdf-f160w.fits -hCLUMPS -cRA,DEC         
           \
            > apertures.txt
 @end example
 
-We can now feed this catalog into MakeProfiles to build the apertures for
-us. See @ref{Invoking astmkprof} for a description of the options. The most
-important for this particular job is @option{--mforflatpix}, it tells
-MakeProfiles that the values in the magnitude column should be used for
-each pixel of a flat profile. Without it, MakeProfiles would build the
-profiles such that the @emph{sum} of the pixels of each profile would have
-a @emph{magnitude} (in log-scale) of the value given in that column (what
-you would expect when simulating a galaxy for example).
+We can now feed this catalog into MakeProfiles using the command below to
+build the apertures over the image. The most important option for this
+particular job is @option{--mforflatpix}, it tells MakeProfiles that the
+values in the magnitude column should be used for each pixel of a flat
+profile. Without it, MakeProfiles would build the profiles such that the
+@emph{sum} of the pixels of each profile would have a @emph{magnitude} (in
+log-scale) of the value given in that column (what you would expect when
+simulating a galaxy for example). See @ref{Invoking astmkprof} for details
+on the options.
 
 @example
 $ astmkprof apertures.txt --background=flat-ir/xdf-f160w.fits     \
@@ -3860,48 +4132,63 @@ $ astmkcatalog apertures.fits -h1 --zeropoint=26.27     
   \
 @end example
 
 This catalog has the same number of rows as the catalog produced from
-clumps, therefore similar to how we found colors, you can compare the
-aperture and clump magnitudes for example. You can also change the filter
-name and zeropoint magnitudes and run this command again to have the fixed
-aperture magnitude in the F160W filter and measure colors on apertures.
+clumps in @ref{Working with catalogs estimating colors}. Therefore similar
+to how we found colors, you can compare the aperture and clump magnitudes
+for example.
+
+You can also change the filter name and zeropoint magnitudes and run this
+command again to have the fixed aperture magnitude in the F160W filter and
+measure colors on apertures.
+
 
+@node Finding reddest clumps and visual inspection, Citing and acknowledging 
Gnuastro, Aperture photomery, General program usage tutorial
+@subsection Finding reddest clumps and visual inspection
 @cindex GNU AWK
-As a final step, let's go back to the original clumps-based catalogs we
-generated before. We'll find the objects with the strongest color and make
-a cutout to inspect them visually and finally, we'll see how they are
-located on the image.
+As a final step, let's go back to the original clumps-based color
+measurement we generated in @ref{Working with catalogs estimating
+colors}. We'll find the objects with the strongest color and make a cutout
+to inspect them visually and finally, we'll see how they are located on the
+image. With the command below, we'll select the reddest objects (those with
+a color larger than 1.5):
 
-First, let's see what the objects with a color more than two magnitudes
-look like. As you see, this is very much like the command above for
-selecting the colors, only instead of printing the color, we'll print the
-RA and Dec. With the command below, the positions of all lines with a color
-more than 1.5 will be put in @file{reddest.txt}
+@example
+$ asttable cat/f105w-f160w.fits --range=COLOR_F105W_F160W,1.5,inf
+@end example
+
+We want to crop the F160W image around each of these objects, but we need a
+unique identifier for them first. We'll define this identifier using the
+object and clump labels (with an underscore between them) and feed the
+output of the command above to AWK to generate a catalog. Note that since
+we are making a plain text table, we'll define the column metadata manually
+(see @ref{Gnuastro text table format}).
 
 @example
-$ asttable cat/xdf-f160w-f105w.fits                                \
-           -cMAG_F160W,MAG_F105W,SN_F105W,RA,DEC                   \
-           | awk '$1!="nan" && $2!="nan" && $2-$1>1.5 && $3>7      \
-                  @{print $4,$5@}' > reddest.txt
+$ echo "# Column 1: ID [name, str10] Object ID" > reddest.txt
+$ asttable cat/f105w-f160w.fits --range=COLOR_F105W_F160W,1.5,inf \
+           | awk '@{printf("%d_%-10d %f %f\n", $1, $2, $3, $4)@}' \
+           >> reddest.txt
 @end example
 
-We can now feed @file{reddest.txt} into Gnuastro's crop to see what these
-objects look like. To keep things clean, we'll make a directory called
-@file{crop-red} and ask Crop to save the crops in this directory. We'll
-also add a @file{-f160w.fits} suffix to the crops (to remind us which image
-they came from). The width of the crops will be 15 arcseconds.
+We can now feed @file{reddest.txt} into Gnuastro's Crop program to see what
+these objects look like. To keep things clean, we'll make a directory
+called @file{crop-red} and ask Crop to save the crops in this
+directory. We'll also add a @file{-f160w.fits} suffix to the crops (to
+remind us which image they came from). The width of the crops will be 15
+arcseconds.
 
 @example
 $ mkdir crop-red
-$ astcrop --mode=wcs --coordcol=3 --coordcol=4 flat-ir/xdf-f160w.fits \
-          --catalog=reddest.txt --width=15/3600,15/3600               \
+$ astcrop flat-ir/xdf-f160w.fits --mode=wcs --namecol=ID \
+          --catalog=reddest.txt --width=15/3600,15/3600  \
           --suffix=-f160w.fits --output=crop-red
 @end example
 
-Like the MakeProfiles command above, you might notice that the crops aren't
-made in order. This is because each crop is independent of the rest,
-therefore crops are done in parallel, and parallel operations are
-asynchronous. In the command above, you can change @file{f160w} to
-@file{f105w} to make the crops in both filters.
+You can see all the cropped FITS files in the @file{crop-red}
+directory. Like the MakeProfiles command in @ref{Aperture photomery}, you
+might notice that the crops aren't made in order. This is because each crop
+is independent of the rest, therefore crops are done in parallel, and
+parallel operations are asynchronous. In the command above, you can change
+@file{f160w} to @file{f105w} to make the crops in both filters.
 
 To view the crops more easily (not having to open ds9 for each image), you
 can convert the FITS crops into the JPEG format with a shell loop like
@@ -3916,19 +4203,7 @@ $ cd ..
 @end example
 
 You can now use your general graphic user interface image viewer to flip
-through the images more easily. On GNOME, you can use the ``Eye of GNOME''
-image viewer (with executable name of @file{eog}). Run the command below to
-open the first one (if you aren't using GNOME, use the command of your
-image viewer instead of @code{eog}):
-
-@example
-$ eog 1-f160w.jpg
-@end example
-
-In Eye of GNOME, you can flip through the images and compare them visually
-more easily by pressing the @key{<SPACE>} key. Of course, the flux ranges
-have been chosen generically here for seeing the fainter parts. Therefore,
-brighter objects will be fully black.
+through the images more easily, or import them into your papers/reports.
 
 @cindex GNU Parallel
 The @code{for} loop above to convert the images will do the job in series:
@@ -3955,11 +4230,11 @@ convert your catalog into a ``region file'' to feed 
into DS9. To do that,
 you can use AWK again as shown below.
 
 @example
-$ awk 'BEGIN@{print "# Region file format: DS9 version 4.1";     \
-             print "global color=green width=2";                \
-             print "fk5";@}                                      \
-       @{printf "circle(%s,%s,1\")\n", $1, $2;@}' reddest.txt     \
-       > reddest.reg
+$ awk 'BEGIN@{print "# Region file format: DS9 version 4.1";      \
+             print "global color=green width=2";                 \
+             print "fk5";@}                                       \
+       !/^#/@{printf "circle(%s,%s,1\") # text=@{%s@}\n",$2,$3,$1;@}'\
+      reddest.txt > reddest.reg
 @end example
 
 This region file can be loaded into DS9 with its @option{-regions} option
@@ -3972,6 +4247,9 @@ $ ds9 -mecube seg/xdf-f160w.fits -zscale -zoom to fit    \
       -regions load all reddest.reg
 @end example
 
+
+@node Citing and acknowledging Gnuastro,  , Finding reddest clumps and visual 
inspection, General program usage tutorial
+@subsection Citing and acknowledging Gnuastro
 In conclusion, we hope this extended tutorial has been a good starting
 point to help in your exciting research. If this book or any of the
 programs in Gnuastro have been useful for your research, please cite the
@@ -3993,7 +4271,7 @@ $ astnoisechisel --cite
 
 
 
-@node Detecting large extended targets, Hubble visually checks and classifies 
his catalog, General program usage tutorial, Tutorials
+@node Detecting large extended targets,  , General program usage tutorial, 
Tutorials
 @section Detecting large extended targets
 
 The outer wings of large and extended objects can sink into the noise very
@@ -4080,41 +4358,53 @@ directory clean.
 $ bunzip2 r.fits.bz2
 @end example
 
-Let's see how NoiseChisel operates on it with its default parameters:
+
+@menu
+* NoiseChisel optimization::    Optimize NoiseChisel to dig very deep.
+* Achieved surface brightness level::  Measure how much you detected.
+@end menu
+
+@node NoiseChisel optimization, Achieved surface brightness level, Detecting 
large extended targets, Detecting large extended targets
+@subsection NoiseChisel optimization
+In @ref{Detecting large extended targets} we downladed the single exposure
+SDSS image. Let's see how NoiseChisel operates on it with its default
+parameters:
 
 @example
 $ astnoisechisel r.fits -h0
 @end example
 
-As described in @ref{NoiseChisel output}, NoiseChisel's default output is a
-multi-extension FITS file. A method to view them effectively and easily is
-discussed in @ref{Viewing multiextension FITS images}. For more on tweaking
-NoiseChisel and optimizing its output for archiving or sending to
-colleagues, see the NoiseChisel part of the previous tutorial in
-@ref{General program usage tutorial}.
+As described in @ref{Multiextension FITS files NoiseChisel's output},
+NoiseChisel's default output is a multi-extension FITS file. Open the
+output @file{r_detected.fits} file and have a look at the extensions, the
+first extension is only meta-data and contains NoiseChisel's configuration
+parameters. The rest are the Sky-subtracted input, the detection map, Sky
+values and Sky standard deviation.
 
-Open the output @file{r_detected.fits} file and have a look at the
-extensions, the first extension is only meta-data and contains
-NoiseChisel's configuration parameters. The rest are the Sky-subtracted
-input, the detection map, Sky values and Sky standard deviation.
+@example
+$ ds9 -mecube r_detected.fits -zscale -zoom to fit
+@end example
 
-Flipping through the extensions in a FITS viewer (for example SAO DS9), you
-will see that the Sky-subtracted image looks reasonable (there are no major
-artifacts due to bad Sky subtraction compared to the input). The second
+Flipping through the extensions in a FITS viewer, you will see that the
+first image (Sky-subtracted image) looks reasonable: there are no major
+artifacts due to bad Sky subtraction compared to the input. The second
 extension also seems reasonable with a large detection map that covers the
 whole of NGC5195, but also extends beyond towards the bottom of the
-image. Try going back and forth between the @code{DETECTIONS} and
-@code{SKY} extensions, you will notice that there is still significant
+image.
+
+Now try fliping between the @code{DETECTIONS} and @code{SKY} extensions.
+In the @code{SKY} extension, you'll notice that there is still significant
 signal beyond the detected pixels. You can tell that this signal belongs to
 the galaxy because the far-right side of the image is dark and the brighter
-tiles (that weren't interpolated) are surrounding the detected pixels.
+tiles are surrounding the detected pixels.
 
 The fact that signal from the galaxy remains in the Sky dataset shows that
-you haven't done a good detection. Generally, any time your target is much
-larger than the tile size and the signal is almost flat (like this case),
-this @emph{will} happen. Therefore, when there are large objects in the
-dataset, @strong{the best place} to check the accuracy of your detection is
-the estimated Sky image.
+you haven't done a good detection. The @code{SKY} extension must not
+contain any light around the galaxy. Generally, any time your target is
+much larger than the tile size and the signal is almost flat (like this
+case), this @emph{will} happen. Therefore, when there are large objects in
+the dataset, @strong{the best place} to check the accuracy of your
+detection is the estimated Sky image.
 
 When dominated by the background, noise has a symmetric
 distribution. However, signal is not symmetric (we don't have negative
@@ -4129,9 +4419,9 @@ However, skewness is only a proxy for signal when the 
signal has structure
 (varies per pixel). Therefore, when it is approximately constant over a
 whole tile, or sub-set of the image, the signal's effect is just to shift
 the symmetric center of the noise distribution to the positive and there
-won't be any skewness (major difference between the mean and median): this
+won't be any skewness (major difference between the mean and median). This
 positive@footnote{In processed images, where the Sky value can be
-over-estimated, this constant shift can be negative.}  shift that preserves
+over-estimated, this constant shift can be negative.} shift that preserves
 the symmetric distribution is the Sky value. When there is a gradient over
 the dataset, different tiles will have different constant
 shifts/Sky-values, for example see Figure 11 of
@@ -4157,9 +4447,9 @@ $ astnoisechisel r.fits -h0 --checkqthresh
 @end example
 
 Notice how this option doesn't allow NoiseChisel to finish. NoiseChisel
-aborted after finding the quantile thresholds. When you call any of
-NoiseChisel's @option{--check*} options, by default, it will abort as soon
-as all the check steps have been written in the check file (a
+aborted after finding and applying the quantile thresholds. When you call
+any of NoiseChisel's @option{--check*} options, by default, it will abort
+as soon as all the check steps have been written in the check file (a
 multi-extension FITS file). This allows you to focus on the problem you
 wanted to check as soon as possible (you can disable this feature with the
 @option{--continueaftercheck} option).
@@ -4175,12 +4465,12 @@ have a new dataset in front of you. Robust data 
analysis is an art,
 therefore a good scientist must first be a good artist.
 
 The first extension of @file{r_qthresh.fits} (@code{CONVOLVED}) is the
-convolved input image where the threshold(s) is defined and applied. For
-more on the effect of convolution and thresholding, see Sections 3.1.1 and
-3.1.2 of @url{https://arxiv.org/abs/1505.01664, Akhlaghi and Ichikawa
-[2015]}. The second extension (@code{QTHRESH_ERODE}) has a blank value for
-all the pixels of any tile that was identified as having significant
-signal. The next two extensions (@code{QTHRESH_NOERODE} and
+convolved input image where the threshold(s) is(are) defined and
+applied. For more on the effect of convolution and thresholding, see
+Sections 3.1.1 and 3.1.2 of @url{https://arxiv.org/abs/1505.01664, Akhlaghi
+and Ichikawa [2015]}. The second extension (@code{QTHRESH_ERODE}) has a
+blank value for all the pixels of any tile that was identified as having
+significant signal. The next two extensions (@code{QTHRESH_NOERODE} and
 @code{QTHRESH_EXPAND}) are the other two quantile thresholds that are
 necessary in NoiseChisel's later steps. Every step in this file is repeated
 on the three thresholds.
@@ -4194,8 +4484,8 @@ galaxy have been removed in this step. For more on the 
outlier rejection
 algorithm, see the latter half of @ref{Quantifying signal in a tile}.
 
 However, the default outlier rejection parameters weren't enough, and when
-you play with the color-bar, you see that the faintest parts of the galaxy
-outskirts still remain. Therefore have two strategies for approaching this
+you play with the color-bar, you still see a strong gradient around the
+outer tidal feature of the galaxy. You have two strategies for fixing this
 problem: 1) Increase the tile size to get more accurate measurements of
 skewness. 2) Strengthen the outlier rejection parameters to discard more of
 the tiles with signal. Fortunately in this image we have a sufficiently
@@ -4219,114 +4509,220 @@ directly feed the convolved image and avoid 
convolution. For more on
 
 To identify the skewness caused by the flat NGC 5195 and M51 tidal features
 on the tiles under it, we thus have to choose a tile size that is larger
-than the gradient of the signal. Let's try a 100 by 100 tile size:
+than the gradient of the signal. Let's try a tile size of 75 by 75 pixels:
 
 @example
-$ astnoisechisel r.fits -h0 --tilesize=100,100 --checkqthresh
+$ astnoisechisel r.fits -h0 --tilesize=75,75 --checkqthresh
 @end example
 
 You can clearly see the effect of this increased tile size: the tiles are
-much larger (@mymath{\times4} in area) and when you look into
-@code{VALUE1_NO_OUTLIER}, you see that almost all the tiles under the
-galaxy have been discarded and we are only left with tiles in the
-right-most part of the image. The next group of extensions (those ending
-with @code{_INTERP}), give a value to all blank tiles based on the nearest
-tiles with a measurement. The following group of extensions (ending with
-@code{_SMOOTH}) have smoothed the interpolated image to avoid sharp cuts on
-tile edges.
-
-Inspecting @code{THRESH1_SMOOTH}, you can see that there is no longer any
-significant gradient and no major signature of NGC 5195 exists. But before
-finishing the quantile threshold, let's have a closer look at the final
-extension (@code{QTHRESH-APPLIED}) which is thresholded image. Slide the
-dynamic range in your FITS viewer so 0 valued pixels are black and all
-non-zero pixels are white. You will see that the black holes are not evenly
-distributed. Those that follow the tail of NGC 5195 are systematically
-smaller than those in the far-right of the image. This suggests that we can
-decrease the quantile threshold (@code{--qthresh}) even further: there is
-still signal down there!
+much larger and when you look into @code{VALUE1_NO_OUTLIER}, you see that
+almost all the previous tiles under the galaxy have been discarded and we
+only have a few tiles on the edge with a gradient. So let's define a smore
+strict condition to keep tiles:
+
+@example
+$ astnoisechisel r.fits -h0 --tilesize=75,75 --meanmedqdiff=0.001 \
+                 --checkqthresh
+@end example
+
+After constraining @code{--meanmedqdiff}, NoiseChisel stopped with a
+different error. Please read it: at the start, it says that only 6 tiles
+passed the constraint while you have asked for 9. The @file{r_qthresh.fits}
+image also only has 8 extensions (not the original 15). Take a look at the
+initially selected tiles and those after outlier rejection. You can see the
+place of the tiles that passed. They seem to be in the good place (very far
+away from the M51 group and its tidal feature. Using the 6 nearest
+neighbors is also not too bad. So let's decrease the number of neighboring
+tiles for interpolation so NoiseChisel can continue:
+
+@example
+$ astnoisechisel r.fits -h0 --tilesize=75,75 --meanmedqdiff=0.001 \
+                 --interpnumngb=6 --checkqthresh
+@end example
+
+The next group of extensions (those ending with @code{_INTERP}), give a
+value to all blank tiles based on the nearest tiles with a measurement. The
+following group of extensions (ending with @code{_SMOOTH}) have smoothed
+the interpolated image to avoid sharp cuts on tile edges. Inspecting
+@code{THRESH1_SMOOTH}, you can see that there is no longer any significant
+gradient and no major signature of NGC 5195 exists.
+
+We can now remove @option{--checkqthresh} and let NoiseChisel proceed with
+its detection. Also, similar to the argument in @ref{NoiseChisel
+optimization for detection}, in the command above, we set the
+pseudo-detection signal-to-noise ratio quantile (@option{--snquant}) to
+0.95.
 
 @example
 $ rm r_qthresh.fits
-$ astnoisechisel r.fits -h0 --tilesize=100,100 --qthresh=0.2
+$ astnoisechisel r.fits -h0 --tilesize=75,75 --meanmedqdiff=0.001 \
+                 --interpnumngb=6 --snquant=0.95
 @end example
 
-Since the quantile threshold of the previous command was satisfactory, we
-finally removed @option{--checkqthresh} to let NoiseChisel proceed until
-completion. Looking at the @code{DETECTIONS} extension of NoiseChisel's
-output, we see the right-ward edges in particular have many holes that are
-fully surrounded by signal and the signal stretches out in the noise very
-thinly. This suggests that there is still signal that can be detected. You
-can confirm this guess by looking at the @code{SKY} extension to see that
-indeed, we still have traces of the galaxy outskirts there. Therefore, we
-should dig deeper into the noise.
+Looking at the @code{DETECTIONS} extension of NoiseChisel's output, we see
+the right-ward edges in particular have many holes that are fully
+surrounded by signal and the signal stretches out in the noise very thinly
+(the size of the holes increases as we go out). This suggests that there is
+still signal that can be detected. You can confirm this guess by looking at
+the @code{SKY} extension to see that indeed, there is a clear footprint of
+the M51 group in the Sky image (which is not good!). Therefore, we should
+dig deeper into the noise.
 
-Let's decrease the growth quantile (for larger/deeper growth into the
-noise, with @option{--detgrowquant}) and increase the size of holes that
-can be filled (if they are fully surrounded by signal, with
-@option{--detgrowmaxholesize}).
+With the @option{--detgrowquant} option, NoiseChisel will use the
+detections as seeds and grow them in to the noise. Its value is the
+ultimate limit of the growth in units of quantile (between 0 and
+1). Therefore @option{--detgrowquant=1} means no growth and
+@option{--detgrowquant=0.5} means an ultimate limit of the Sky level (which
+is usually too much!). Try running the previous command with various values
+(from 0.6 to higher values) to see this option's effect. For this
+particularly huge galaxy (with signal that extends very gradually into the
+noise), we'll set it to @option{0.65}:
 
 @example
-$ astnoisechisel r.fits -h0 --tilesize=100,100 --qthresh=0.2          \
-                 --detgrowquant=0.65 --detgrowmaxholesize=10000
+$ astnoisechisel r.fits -h0 --tilesize=75,75 --meanmedqdiff=0.001 \
+                 --interpnumngb=6 --snquant=0.95 --detgrowquant=0.65
 @end example
 
-Looking into the output, we now clearly see that the tidal features of M51
-and NGC 5195 are detected nicely in the same direction as expected (towards
-the bottom right side of the image). However, as discussed above, the best
-measure of good detection is the noise, not the detections themselves. So
-let's look at the Sky and its Standard deviation. The Sky still has a very
-faint shadow of the galaxy outskirts (the values on the left are very
-slightly larger than those on the right).
+Beyond this level (smaller @option{--detgrowquant} values), you see the
+smaller background galaxies starting to create thin spider-leg-like
+features, showing that we are following correlated noise for too much.
 
-Let's calculate this gradient as a function of noise. First we'll collapse
-the image along the second (vertical) FITS dimension to have a 1D
-array. Then we'll calculate the top and bottom values of this array and
-define that as the gradient. Then we'll estimate the mean standard
-deviation over the image and divide it by the first value. The first two
-commands are just for demonstration of the collapsed dataset:
+Now, when you look at the @code{DETECTIONS} extension, you see the wings of
+the galaxy being detected much farther out, But you also see many holes
+which are clearly just caused by noise. After growing the objects,
+NoiseChisel also allows you to fill such holes when they are smaller than a
+certain size through the @option{--detgrowmaxholesize} option. In this
+case, a maximum area/size of 10,000 pixels seems to be good:
+
+@example
+$ astnoisechisel r.fits -h0 --tilesize=75,75 --meanmedqdiff=0.001    \
+                 --interpnumngb=6 --snquant=0.95 --detgrowquant=0.65 \
+                 --detgrowmaxholesize=10000
+@end example
+
+The detection looks good now, but when you look in to the @code{SKY}
+extension, you still clearly still see a footprint of the galaxy. We'll
+leave it as an exercise for you to play with NoiseChisel further and
+improve the detected pixels.
+
+So, we'll just stop with one last tool NoiseChisel gives you to get a
+slightly better estimation of the Sky: @option{--minskyfrac}. On each tile,
+NoiseChisel will only measure the Sky-level if the fraction of undetected
+pixels is larger than the value given to this option. To avoid the edges of
+the galaxy, we'll set it to @option{0.9}. Therefore, tiles that are covered
+by detected pixels for more than @mymath{10\%} of their area are ignored.
+
+@example
+$ astnoisechisel r.fits -h0 --tilesize=75,75 --meanmedqdiff=0.001    \
+                 --interpnumngb=6 --snquant=0.95 --detgrowquant=0.65 \
+                 --detgrowmaxholesize=10000 --minskyfrac=0.9
+@end example
+
+The footprint of the galaxy still exists in the @code{SKY} extension, but
+it has decreased in significance now. Let's calculate the significance of
+the undetected gradient, in units of noise. Since the gradient is roughly
+along the horizontal axis, we'll collapse the image along the second
+(vertical) FITS dimension to have a 1D array (a table column, see its
+values with the second command).
 
 @example
 $ astarithmetic r_detected.fits 2 collapse-mean -hSKY -ocollapsed.fits
 $ asttable collapsed.fits
-$ skydiff=$(astarithmetic r_detected.fits 2 collapse-mean set-i   \
-                          i maxvalue i minvalue - -hSKY -q)
-$ echo $skydiff
+@end example
+
+We can now calculate the minimum and maximum values of this array and
+define their difference (in units of noise) as the gradient:
+
+@example
+$ grad=$(astarithmetic r_detected.fits 2 collapse-mean set-i   \
+                       i maxvalue i minvalue - -hSKY -q)
+$ echo $grad
 $ std=$(aststatistics r_detected.fits -hSKY_STD --mean)
 $ echo $std
-$ echo "$std $skydiff" | awk '@{print $1/$2@}'
-@end example
+$ astarithmetic -q $grad $std /
+@end example
+
+The undetected gradient (@code{grad} above) is thus roughly a quarter of
+the noise. But don't forget that this is per-pixel: individually its small,
+but it extends over millions of pixels, so the total flux may still be
+relevant.
+
+When looking at the raw input shallow image, you don't see anything so far
+out of the galaxy. You might just think that ``this is all noise, I have
+just dug too deep and I'm following systematics''! If you feel like this,
+have a look at the deep images of this system in
+@url{https://arxiv.org/abs/1501.04599, Watkins et al. [2015]}, or a 12 hour
+deep image of this system (with a 12-inch telescope):
+@url{https://i.redd.it/jfqgpqg0hfk11.jpg}@footnote{The image is taken from
+this Reddit discussion:
+@url{https://www.reddit.com/r/Astronomy/comments/9d6x0q/12_hours_of_exposure_on_the_whirlpool_galaxy/}}.
 In
+these deepr images you see that the outer edges of the M51 group clearly
+follow this exact structure, below in @ref{Achieved surface brightness
+level}, we'll measure the exact level.
+
+As the gradient in the @code{SKY} extension shows, and the deep images
+cited above confirm, the galaxy's signal extends even beyond this. But this
+is already far deeper than what most (if not all) other tools can detect.
+Therefore, we'll stop configuring NoiseChisel at this point in the tutorial
+and let you play with it a little more while reading more about it in
+@ref{NoiseChisel}.
+
+After finishing this tutorial please go through the NoiseChisel paper and
+its options and play with them to further decrease the gradient. This will
+greatly help you get a good feeling of the options. When you do find a
+better configuration, please send it to us and we'll mention your name here
+with your suggested configuration. Don't forget that good data analysis is
+an art, so like a sculptor, master your chisel for a good result.
 
-This gradient in the Sky (output of first command below) is much less (by
-more than 20 times) than the standard deviation (final extension). So we
-can stop configuring NoiseChisel at this point in the tutorial. We leave
-further configuration for a more accurate detection to you as an exercise.
+@cartouche
+@noindent
+@strong{This NoiseChisel configuration is NOT GENERIC:} Don't use this
+configuration blindly on another image. As you saw above, the reason we
+chose this particular configuration for NoiseChisel to detect the wings of
+the M51 group was strongly influenced by the noise properties of this
+particular image. So as long as your image noise has similar properties
+(from the same data-reduction step of the same database), you can use this
+configuration on any image. For images from other instruments, or
+higher-level/reduced SDSS products, please follow a similar logic to what
+was presented here and find the best configuation yourself.
+@end cartouche
+
+@cartouche
+@noindent
+@strong{Smart NoiseChisel:} As you saw during this section, there is a
+clear logic behind the optimal paramter value for each dataset. Therfore,
+we plan to capabilities to (optionally) automate some of the choices made
+here based on the actual dataset, please join us in doing this if you are
+interested. However, given the many problems in existing ``smart''
+solutions, such automatic changing of the configuration may cause more
+problems than they solve. So even when they are implemented, we would
+strongly recommend quality checks for a robust analysis.
+@end cartouche
 
-In this shallow image, this extent may seem too far deep into the noise for
-visual confirmation. Therefore, if the statistical argument above, to
-justify the reality of this extended structure, hasn't convinced you, see
-the deep images of this system in @url{https://arxiv.org/abs/1501.04599,
-Watkins et al. [2015]}, or a 12 hour deep image of this system (with a
-12-inch telescope): @url{https://i.redd.it/jfqgpqg0hfk11.jpg}@footnote{The
-image is taken from this Reddit discussion:
-@url{https://www.reddit.com/r/Astronomy/comments/9d6x0q/12_hours_of_exposure_on_the_whirlpool_galaxy/}}.
+@node Achieved surface brightness level,  , NoiseChisel optimization, 
Detecting large extended targets
+@subsection Achieved surface brightness level
+In @ref{NoiseChisel optimization} we showed how to customize NoiseChisel
+for a single-exposure SDSS image of the M51 group. let's measure how deep
+we carved the signal out of noise. For this measurement, we'll need to
+estimate the average flux on the outer edges of the detection. Fortunately
+all this can be done with a few simple commands (and no higher-level
+language mini-environments like Python or IRAF) using @ref{Arithmetic} and
+@ref{MakeCatalog}.
 
-Now that we know this detection is real, let's measure how deep we carved
-the signal out of noise. For this measurement, we'll need to estimate the
-average flux on the outer edges of the detection. Fortunately all this can
-be done with a few simple commands (and no higher-level language
-mini-environments) using @ref{Arithmetic} and @ref{MakeCatalog}. First,
-let's give a separate label to all the connected pixels of NoiseChisel's
-detection map:
+@cindex Opening
+First, let's separate each detected region, or give a unique label/counter
+to all the connected pixels of NoiseChisel's detection map:
 
 @example
-$ astarithmetic r_detected.fits 2 connected-components -hDETECTIONS \
-                -olabeled.fits
+$ det="r_detected.fits -hDETECTIONS"
+$ astarithmetic $det 2 connected-components -olabeled.fits
 @end example
 
-Of course, you can find the the label of the main galaxy visually, but to
-have a little more fun, lets do this automatically. The M51 group detection
-is by far the largest detection in this image. We can thus easily find the
+You can find the the label of the main galaxy visually (by opening the
+image and hovering your mouse over the M51 group's label). But to have a
+little more fun, lets do this automatically. The M51 group detection is by
+far the largest detection in this image, this allows us to find the
 ID/label that corresponds to it. We'll first run MakeCatalog to find the
 area of all the detections, then we'll use AWK to find the ID of the
 largest object and keep it as a shell variable (@code{id}):
@@ -4338,36 +4734,43 @@ $ echo $id
 @end example
 
 To separate the outer edges of the detections, we'll need to ``erode'' the
-detections. We'll erode two times (one time would be too thin for such a
-huge object), using a maximum connectivity of 2 (8-connected
+M51 group detection. We'll erode thre times (to have more pixels and thus
+less scatter), using a maximum connectivity of 2 (8-connected
 neighbors). We'll then save the output in @file{eroded.fits}.
 
 @example
-$ astarithmetic r_detected.fits 0 gt 2 erode -hDETECTIONS -oeroded.fits
+$ astarithmetic labeled.fits $id eq 2 erode 2 erode 2 erode \
+                -oeroded.fits
 @end example
 
 @noindent
-We should now just keep the pixels that have the ID of the M51 group, but a
-value of 0 in @file{erode.fits}. We'll keep the output in
-@file{boundary.fits}.
+In @file{labeled.fits}, we can now set all the 1-valued pixels of
+@file{eroded.fits} to 0 using Arithmetic's @code{where} operator added to
+the previous command. We'll need the pixels of the M51 group in
+@code{labeled.fits} two times: once to do the erosion, another time to find
+the outer pixel layer. To do this (and be efficient and more readable)
+we'll use the @code{set-i} operator. In the command below, it will
+save/set/name the pixels of the M51 group as the `@code{i}'. In this way we
+can use it any number of times afterwards, while only reading it from disk
+and finding M51's pixels once.
 
 @example
-$ astarithmetic labeled.fits $id eq eroded.fits 0 eq and          \
-                -g1 -oboundary.fits
+$ astarithmetic labeled.fits $id eq set-i i \
+                i 2 erode 2 erode 2 erode 0 where -oedge.fits
 @end example
 
 Open the image and have a look. You'll see that the detected edge of the
-M51 group is now clearly visible. You can use @file{boundary.fits} to mark
+M51 group is now clearly visible. You can use @file{edge.fits} to mark
 (set to blank) this boundary on the input image and get a visual feeling of
 how far it extends:
 
 @example
-$ astarithmetic r.fits boundary.fits nan where -ob-masked.fits -h0
+$ astarithmetic r.fits edge.fits nan where -ob-masked.fits -h0
 @end example
 
 To quantify how deep we have detected the low-surface brightness regions,
 we'll use the command below. In short it just divides all the non-zero
-pixels of @file{boundary.fits} in the Sky subtracted input (first extension
+pixels of @file{edge.fits} in the Sky subtracted input (first extension
 of NoiseChisel's output) by the pixel standard deviation of the same
 pixel. This will give us a signal-to-noise ratio image. The mean value of
 this image shows the level of surface brightness that we have achieved.
@@ -4376,7 +4779,7 @@ You can also break the command below into multiple calls 
to Arithmetic and
 create temporary files to understand it better. However, if you have a look
 at @ref{Reverse polish notation} and @ref{Arithmetic operators}, you should
 be able to easily understand what your computer does when you run this
-command@footnote{@file{boundary.fits} (extension @code{1}) is a binary (0
+command@footnote{@file{edge.fits} (extension @code{1}) is a binary (0
 or 1 valued) image. Applying the @code{not} operator on it, just flips all
 its pixels. Through the @code{where} operator, we are setting all the newly
 1-valued pixels in @file{r_detected.fits} (extension @code{INPUT-NO-SKY})
@@ -4387,38 +4790,37 @@ the @code{meanvalue} operator, we are taking the mean 
value of all the
 non-blank pixels and reporting that as a single number.}.
 
 @example
-$ astarithmetic r_detected.fits boundary.fits not nan where \
-                r_detected.fits /                           \
-                meanvalue                                   \
-                -hINPUT-NO-SKY -h1 -hSKY_STD --quiet
+$ edge="edge.fits -h1"
+$ skystd="r_detected.fits -hSKY_STD"
+$ skysub="r_detected.fits -hINPUT-NO-SKY"
+$ astarithmetic $skysub $skystd / $edge not nan where       \
+                meanvalue --quiet
 @end example
 
-@noindent
-The outer wings where therefore non-parametrically detected until
-@mymath{\rm{S/N}\approx0.05}!
-
 @cindex Surface brightness
-This is very good! But the signal-to-noise ratio is a relative measurement.
-Let's also measure the depth of our detection in absolute surface
-brightness units; or magnitudes per square arcseconds. To find out, we'll
-first need to calculate how many pixels of this image are in one
+We have thus detected the wings of the M51 group down to roughly 1/4th of
+the noise level in this image! But the signal-to-noise ratio is a relative
+measurement. Let's also measure the depth of our detection in absolute
+surface brightness units; or magnitudes per square arcseconds. To find out,
+we'll first need to calculate how many pixels of this image are in one
 arcsecond-squared. Fortunately the world coordinate system (or WCS) meta
 data of Gnuastro's output FITS files (in particular the @code{CDELT}
 keywords) give us this information.
 
 @example
-$ n=$(astfits r_detected.fits -h1                                     \
-              | awk '/CDELT1/ @{p=1/($3*3600); print p*p@}')
-$ echo $n
+$ pixscale=$(astfits r_detected.fits -h1                           \
+                    | awk '/CDELT1/ @{p=1/($3*3600); print p*p@}')
+$ echo $pixscale
 @end example
 
 @noindent
-Now, let's calculate the average sky-subtracted flux in the border region
-per pixel.
+Note that we multiplied the value by 3600 so we work in units of
+arc-seconds not degrees. Now, let's calculate the average sky-subtracted
+flux in the border region per pixel.
 
 @example
-$ f=$(astarithmetic r_detected.fits boundary.fits not nan where set-i \
-                    i sumvalue i numvalue / -q -hINPUT-NO-SKY)
+$ f=$(astarithmetic r_detected.fits edge.fits not nan where set-i \
+                    i sumvalue i numbervalue / -q -hINPUT-NO-SKY)
 $ echo $f
 @end example
 
@@ -4435,12 +4837,12 @@ correct for this.
 
 @example
 $ z=24.80
-$ echo "$n $f $z" | awk '@{print -2.5*log($1*$2)/log(10)+$3@}'
---> 30.0646
+$ echo "$pixscale $f $z" | awk '@{print -2.5*log($1*$2)/log(10)+$3@}'
+--> 28.2989
 @end example
 
-This shows that on a single-exposure SDSS image, we have reached a surface
-brightness limit of roughly 30 magnitudes per arcseconds squared!
+On a single-exposure SDSS image, we have reached a surface brightness limit
+fainter than 28 magnitudes per arcseconds squared!
 
 In interpreting this value, you should just have in mind that NoiseChisel
 works based on the contiguity of signal in the pixels. Therefore the larger
@@ -4451,29 +4853,10 @@ in this image was larger/smaller than this, or if the 
image was
 larger/smaller, or if we had used a different configuration, we would go
 deeper/shallower.
 
-@cartouche
-@noindent
-@strong{The NoiseChisel configuration found here is NOT GENERIC for any
-large object:} As you saw above, the reason we chose this particular
-configuration for NoiseChisel to detect the wings of the M51 group was
-strongly influenced by this particular object in this particular
-image. When low surface brightness signal takes over such a large fraction
-of your dataset (and you want to accurately detect/account for it), to make
-sure that it is successfully detected, you will need some manual checking,
-intervention, or customization. In other words, to make sure that your
-noise measurements are least affected by the signal@footnote{In the future,
-we may add capabilities to optionally automate some of the choices made
-here, please join us in doing this if you are interested. However, given
-the many problems in existing ``smart'' solutions, such automatic changing
-of the configuration may cause more problems than they solve. So even when
-they are implemented, we would strongly recommend manual checks and
-intervention for a robust analysis.}.
-@end cartouche
-
 To avoid typing all these options every time you run NoiseChisel on this
 image, you can use Gnuastro's configuration files, see @ref{Configuration
-files}. For an applied example of setting/using them, see @ref{General
-program usage tutorial}.
+files}. For an applied example of setting/using them, see @ref{Option
+management and configuration files}.
 
 To continue your analysis of such datasets with extended emission, you can
 use @ref{Segment} to identify all the ``clumps'' over the diffuse regions:
@@ -4610,13 +4993,14 @@ reading of the command, we'll define the shell variable 
@code{i} for the
 image name and save the output in @file{masked.fits}.
 
 @example
-$ i=r_detected_segmented.fits
-$ astarithmetic $i $i 0 gt nan where -hINPUT -hCLUMPS -omasked.fits
+$ in="r_detected_segmented.fits -hINPUT"
+$ clumps="r_detected_segmented.fits -hCLUMPS"
+$ astarithmetic $in $clumps 0 gt nan where -oclumps-masked.fits
 @end example
 
-Inspecting @file{masked.fits}, you can see some very diffuse peaks that
-have been missed, especially as you go farther away from the group center
-and into the diffuse wings. This is due to the fact that with this
+Inspecting @file{clumps-masked.fits}, you can see some very diffuse peaks
+that have been missed, especially as you go farther away from the group
+center and into the diffuse wings. This is due to the fact that with this
 configuration, we have focused more on the sharper clumps. To put the focus
 more on diffuse clumps, you can use a wider convolution kernel. Using a
 larger kernel can also help in detecting the existing clumps to fainter
@@ -4629,282 +5013,24 @@ smaller peaks on the wings. Please continue playing 
with Segment's
 configuration to obtain a more complete result (while keeping reasonable
 purity). We'll finish the discussion on finding true clumps at this point.
 
-The properties of the background objects can then easily be measured using
-@ref{MakeCatalog}. To measure the properties of the background objects
-(detected as clumps over the diffuse region), you shouldn't mask the
-diffuse region. When measuring clump properties with @ref{MakeCatalog}, the
-ambient flux (from the diffuse region) is calculated and subtracted. If the
-diffuse region is masked, its effect on the clump brightness cannot be
-calculated and subtracted.
-
-To keep this tutorial short, we'll stop here. See @ref{General program
-usage tutorial} and @ref{Segment} for more on using Segment, producing
+The properties of the clumps within M51, or the background objects can then
+easily be measured using @ref{MakeCatalog}. To measure the properties of
+the background objects (detected as clumps over the diffuse region), you
+shouldn't mask the diffuse region. When measuring clump properties with
+@ref{MakeCatalog} and using the @option{--clumpscat}, the ambient flux
+(from the diffuse region) is calculated and subtracted. If the diffuse
+region is masked, its effect on the clump brightness cannot be calculated
+and subtracted.
+
+To keep this tutorial short, we'll stop here. See @ref{Segmentation and
+making a catalog} and @ref{Segment} for more on using Segment, producing
 catalogs with MakeCatalog and using those catalogs.
 
-Finally, if this book or any of the programs in Gnuastro have been useful
-for your research, please cite the respective papers, and acknowledge the
-funding agencies that made all of this possible. All Gnuastro programs have
-a @option{--cite} option to facilitate the citation and
-acknowledgment. Just note that it may be necessary to cite additional
-papers for different programs, so please try it out on all the programs
-that you used, for example:
-
-@example
-$ astmkcatalog --cite
-$ astnoisechisel --cite
-@end example
 
 
 
 
-@node Hubble visually checks and classifies his catalog,  , Detecting large 
extended targets, Tutorials
-@section Hubble visually checks and classifies his catalog
 
-@cindex Edwin Hubble
-In 1924 Hubble@footnote{Edwin Powell Hubble (1889 -- 1953 A.D.) was an
-American astronomer who can be considered as the father of
-extra-galactic astronomy, by proving that some nebulae are too distant
-to be within the Galaxy. He then went on to show that the universe
-appears to expand and also done a visual classification of the
-galaxies that is known as the Hubble fork.} announced his discovery
-that some of the known nebulous objects are too distant to be within
-the the Milky Way (or Galaxy) and that they were probably distant
-Galaxies@footnote{Note that at that time, ``Galaxy'' was a proper noun
-used to refer to the Milky way. The concept of a galaxy as we define
-it today had not yet become common. Hubble played a major role in
-creating today's concept of a galaxy.} in their own right. He had also
-used them to show that the redshift of the nebulae increases with
-their distance. So now he wants to study them more accurately to see
-what they actually are. Since they are nebulous or amorphous, they
-can't be modeled (like stars that are always a point) easily. So there
-is no better way to distinguish them than to visually inspect them and
-see if it is possible to classify these nebulae or not.
-
-@cartouche
-@noindent
-@strong{No default dataset in this tutorial:} Unfortunately there is no
-input dataset for this tutorial. We will add a dataset and corresponding
-table to smoothly run this tutorial later. But until then, you can use the
-final catalog that you produced in @ref{General program usage
-tutorial}. The start of this tutorial has some overlaps with the ending of
-@ref{General program usage tutorial}. You can just select some of the
-brighter sources to make it shorter and more manageable.
-@end cartouche
-
-Hubble has stored all the FITS images of the objects he wants to visually
-inspect in his @file{/mnt/data/images} directory. He has also stored his
-catalog of extra-galactic nebulae in
-@file{/mnt/data/catalogs/extragalactic.txt}. Any normal user on his
-GNU/Linux system (including himself) only has read access to the contents
-of the @file{/mnt/data} directory. He has done this by running this command
-as root:
-
-@example
-# chmod -R 755 /mnt/data
-@end example
-
-@noindent
-Hubble has done this intentionally to avoid mistakenly deleting or
-modifying the valuable images he has taken at Mount Wilson while he is
-working as an ordinary user. Retaking all those images and data is
-simply not an option. In fact they are also in another hard disk
-(@file{/dev/sdb1}). So if the hard disk which stores his GNU/Linux
-distribution suddenly malfunctions due to work load, his data is not
-in harms way. That hard disk is only mounted to this directory when he
-wants to use it with the command:
-
-@example
-# mount /dev/sdb1 /mnt/data
-@end example
-
-@noindent
-In short, Hubble wants to keep his data safe and fortunately by
-default Gnuastro allows for this.  Hubble creates a temporary
-@file{visualcheck} directory in his home directory for this check. He
-runs the following commands to make the directory and change to
-it@footnote{The @code{pwd} command is short for ``Print Working
-Directory'' and @code{ls} is short for ``list'' which shows the
-contents of a directory.}:
-
-@example
-$ mkdir ~/visualcheck
-$ cd ~/visualcheck
-$ pwd
-/home/edwin/visualcheck
-$ ls
-@end example
-
-Hubble has multiple images in @file{/mnt/data/images}, some of his targets
-might be on the edges of an image and so several images need to be stitched
-to give a good view of them. Also his extra-galactic targets belong to
-various pointings in the sky, so they are not in one large
-image. Gnuastro's Crop is just the program he wants. The catalog in
-@file{extragalactic.txt} is a plain text file which stores the basic
-information of all his known 200 extra-galactic nebulae. If you don't have
-any particular catalog and accompanying image, you can use one the Hubble
-Space Telescope F160W catalog that we produced in @ref{General program
-usage tutorial} along with the accompanying image (specify the exact image
-name, not @file{/mnt/data/images/*.fits}). You can select the brightest
-galaxies for an easier classification.
-
-@cindex WCS
-@cindex World coordinate system
-In its second column, the catalog has each object's Right Ascension (the
-first column is a label he has given to each object), and in the third, the
-object's declination (which he specifies with the @option{--coordcol}
-option). Also, since the coordinates are in the world coordinate system
-(WCS, not pixel positions) units, he adds @option{--mode=wcs}.
-
-@example
-$ astcrop --coordcol=2 --coordcol=3 /mnt/data/images/*.fits     \
-          --mode=wcs /mnt/data/catalogs/extragalactic.txt
-Crop started on Tue Jun  14 10:18:11 1932
-  ---- ./4_crop.fits                  1 1
-  ---- ./2_crop.fits                  1 1
-  ---- ./1_crop.fits                  1 1
-[[[ Truncated middle of list ]]]
-  ---- ./198_crop.fits                1 1
-  ---- ./195_crop.fits                1 1
-  - 200 images created.
-  - 200 were filled in the center.
-  - 0 used more than one input.
-Crop finished in:  2.429401 (seconds)
-@end example
-
-
-@cindex Asynchronous thread allocation
-@noindent
-Hubble already knows that thread allocation to the the CPU cores is
-asynchronous. Hence each time you run it, the order of which job gets done
-first differs. When using Crop the order of outputs is irrelevant since
-each crop is independent of the rest. This is why the crops are not
-necessarily created in the same input order. He is satisfied with the
-default width of the outputs (which he inspected by running @code{$ astcrop
--P}). If he wanted a different width for the cropped images, he could do
-that with the @option{--wwidth} option which accepts a value in
-arc-seconds.  When he lists the contents of the directory again he finds
-his 200 objects as separate FITS images.
-
-@example
-$ ls
-1_crop.fits 2_crop.fits ... 200_crop.fits
-@end example
-
-@cindex GNU Parallel
-The FITS image format was not designed for efficient/fast viewing, but
-mainly for accurate storing of the data. So he chooses to convert the
-cropped images to a more common image format to view them more quickly and
-easily through standard image viewers (which load much faster than FITS
-image viewer). JPEG is one of the most recognized image formats that is
-supported by most image viewers. Fortunately Gnuastro has just such a tool
-to convert various types of file types to and from each other:
-ConvertType. Hubble has already heard of GNU Parallel from one of his
-colleagues at Mount Wilson Observatory. It allows multiple instances of a
-command to be run simultaneously on the system, so he uses it in
-conjunction with ConvertType to convert all the images to JPEG.
-@example
-$ parallel astconvertt -ojpg ::: *_crop.fits
-@end example
-
-@pindex eog
-@cindex Eye of GNOME
-For his graphical user interface Hubble is using GNOME which is the default
-in most distributions in GNU/Linux. The basic image viewer in GNOME is the
-Eye of GNOME, which has the executable file name @command{eog}
-@footnote{Eye of GNOME is only available for users of the GNOME graphical
-desktop environment which is the default in most GNU/Linux
-distributions. If you use another graphical desktop environment, replace
-@command{eog} with any other image viewer.}. Since he has used it before,
-he knows that once it opens an image, he can use the @key{ENTER} or
-@key{SPACE} keys on the keyboard to go to the next image in the directory
-or the @key{Backspace} key to go the previous image. So he opens the image
-of the first object with the command below and with his cup of coffee in
-his other hand, he flips through his targets very fast to get a good
-initial impression of the morphologies of these extra-galactic nebulae.
-
-@example
-$ eog 1_crop.jpg
-@end example
-
-@cindex GNU Bash
-@cindex GNU Emacs
-@cindex Spiral galaxies
-@cindex Elliptical galaxies
-Hubble's cup of coffee is now finished and he also got a nice general
-impression of the shapes of the nebulae. He tentatively/mentally
-classified the objects into three classes while doing the visual
-inspection. One group of the nebulae have a very simple elliptical
-shape and seem to have no internal special structure, so he gives them
-code 1. Another clearly different class are those which have spiral
-arms which he associates with code 2 and finally there seems to be a
-class of nebulae in between which appear to have a disk but no spiral
-arms, he gives them code 3.
-
-Now he wants to know how many of the nebulae in his extra-galactic sample
-are within each class. Repeating the same process above and writing the
-results on paper is very time consuming and prone to errors. Fortunately
-Hubble knows the basics of GNU Bash shell programming, so he writes the
-following short script with a loop to help him with the job. After all,
-computers are made for us to operate and knowing basic shell programming
-gives Hubble this ability to creatively operate the computer as he
-wants. So using GNU Emacs@footnote{This can be done with any text editor}
-(his favorite text editor) he puts the following text in a file named
-@file{classify.sh}.
-
-@example
-for name in *.jpg
-do
-    eog $name &
-    processid=$!
-    echo -n "$name belongs to class: "
-    read class
-    echo $name $class >> classified.txt
-    kill $processid
-done
-@end example
-
-@cindex Gedit
-@cindex GNU Emacs
-Fortunately GNU Emacs or even simpler editors like Gedit (part of the
-GNOME graphical user interface) will display the variables and shell
-constructs in different colors which can really help in understanding
-the script. Put simply, the @code{for} loop gets the name of each JPEG
-file in the directory this script is run in and puts it in
-@code{name}. In the shell, the value of a variable is used by putting
-a @code{$} sign before the variable name. Then Eye of GNOME is run on
-the image in the background to show him that image and its process ID
-is saved internally (this is necessary to close Eye of GNOME
-later). The shell then prompts the user to specify a class and after
-saving it in @code{class}, it prints the file name and the given class
-in the next line of a file named @file{classified.txt}. To make the
-script executable (so he can run it later any time he wants) he runs:
-
-@example
-$ chmod +x classify.sh
-@end example
-
-@noindent
-Now he is ready to do the classification, so he runs the script:
-
-@example
-$ ./classify.sh
-@end example
-
-@noindent
-In the end he can delete all the JPEG and FITS files along with Crop's log
-file with the following short command. The only files remaining are the
-script and the result of the classification.
-
-@example
-$ rm *.jpg *.fits astcrop.txt
-$ ls
-classified.txt   classify.sh
-@end example
-
-@noindent
-He can now use @file{classified.txt} as input to a plotting program to
-plot the histogram of the classes and start making interpretations
-about what these nebulous objects that are outside of the Galaxy are.
 
 
 
@@ -5099,7 +5225,6 @@ $ ./testprog > testprog.lis
 $ diff testprog.lis testprog.out    # Should have no output
 $ cmp testprog.fit testprog.std     # Should have no output
 $ rm cookbook fitscopy imcopy smem speed testprog
-$ make shared
 $ sudo make install
 @end example
 
@@ -9276,8 +9401,8 @@ make a mock galaxy image, you need to feed in the 
properties of each galaxy
 into @ref{MakeProfiles} for it do the inverse of the process above and make
 a simulated image from a catalog, see @ref{Sufi simulates a detection}. In
 other cases, you can feed a table into @ref{Crop} and it will crop out
-regions centered on the positions within the table, see @ref{Hubble
-visually checks and classifies his catalog}. So to end this relatively long
+regions centered on the positions within the table, see @ref{Finding
+reddest clumps and visual inspection}. So to end this relatively long
 introduction, tables play a very important role in astronomy, or generally
 all branches of data analysis.
 
@@ -12068,8 +12193,8 @@ interpreted as the center of a crop with a width of 
@option{--width} pixels
 along each dimension. The columns can contain any floating point value. The
 value to @option{--output} option is seen as a directory which will host
 (the possibly multiple) separate crop files, see @ref{Crop output} for
-more. For a tutorial using this feature, please see @ref{Hubble visually
-checks and classifies his catalog}.
+more. For a tutorial using this feature, please see @ref{Finding reddest
+clumps and visual inspection}.
 
 @item Center of a single crop (on the command-line)
 The center of the crop is given on the command-line with the
@@ -12314,7 +12439,7 @@ When in catalog mode, Crop will run in parallel unless 
you set
 when multiple outputs are created with threads, the outputs will not be
 created in the same order. This is because the threads are asynchronous and
 thus not started in order. This has no effect on each output, see
-@ref{Hubble visually checks and classifies his catalog} for a tutorial on
+@ref{Finding reddest clumps and visual inspection} for a tutorial on
 effectively using this feature.
 
 @menu
@@ -19903,16 +20028,18 @@ The name or number (counting from zero) of the 
extension containing the
 Sky value as a single number, or the file name containing a dataset
 (different values per pixel or tile). The Sky dataset is only necessary
 when @option{--subtractsky} is called or when a column directly related to
-the Sky value is requested (currently @option{--sky}).
+the Sky value is requested (currently @option{--sky}). This dataset may be
+a tessellation, with one element per tile (see @option{--oneelempertile} of
+NoiseChisel's @ref{Processing options}).
 
-When the Sky dataset is necessary and this option is not called,
-MakeCatalog will assume it is a dataset first look into the
-@option{--valuesfile} (if it is given) and then the main input file (given
-as an argument). By default the values dataset is assumed to be already Sky
-subtracted, so this dataset is not necessary for many of the columns.
+When the Sky dataset is necessary but this option is not called,
+MakeCatalog will assume it is an HDU/extension (specified by
+@option{--skyhdu}) in one of the already given files. First it will look
+for it in the @option{--valuesfile} (if it is given) and then the main
+input file (given as an argument).
 
-This dataset may be tessellation, with one element per tile (see
-@option{--oneelempertile} of @ref{Processing options}).
+By default the values dataset is assumed to be already Sky subtracted, so
+this dataset is not necessary for many of the columns.
 
 @item --skyhdu=STR
 HDU/extension of the Sky dataset, see @option{--skyfile}.
@@ -29480,6 +29607,7 @@ both polygons have to be sorted in an anti-clock-wise 
manner.
 @node Qsort functions, Permutations, Polygons, Gnuastro library
 @subsection Qsort functions (@file{qsort.h})
 
+@cindex @code{qsort}
 When sorting a dataset is necessary, the C programming language provides
 the @code{qsort} (Quick sort) function. @code{qsort} is a generic function
 which allows you to sort any kind of data structure (not just a single
@@ -29487,6 +29615,14 @@ array of numbers). To define ``greater'' and 
``smaller'' (for sorting),
 @code{qsort} needs another function, even for simple numerical types. The
 functions introduced in this section are to passed onto @code{qsort}.
 
+@cindex NaN
+Note that larger and smaller operators are not defined on NaN
+elements. Therefore, if the input array is a floating point type, and
+contains NaN values, the relevant functions of this section are going to
+put the NaN elements at the end of the list (after the sorted non-NaN
+elements), irrespective of the requested sorting order (increasing or
+decreasing).
+
 The first class of functions below (with @code{TYPE} in their names) can be
 used for sorting a simple numeric array. Just replace @code{TYPE} with the
 dataset's numeric datatype. The second set of functions can be used to sort
diff --git a/lib/Makefile.am b/lib/Makefile.am
index 1dda613..d71ccdf 100644
--- a/lib/Makefile.am
+++ b/lib/Makefile.am
@@ -43,7 +43,8 @@ AM_CPPFLAGS = -I\$(top_srcdir)/bootstrapped/lib            \
 # features. This also avoids the need for the programs to link separately
 # with Gnulib, they only need to link with the Gnuastro library.
 lib_LTLIBRARIES = libgnuastro.la
-libgnuastro_la_LDFLAGS = -version-info $(GAL_LT_VERSION)
+libgnuastro_la_LDFLAGS = -version-info $(GAL_LT_VERSION) $(CONFIG_LDADD) \
+                         -lc -no-undefined
 libgnuastro_la_LIBADD = $(top_builddir)/bootstrapped/lib/libgnu.la
 
 
diff --git a/lib/dimension.c b/lib/dimension.c
index 294d811..47a98f8 100644
--- a/lib/dimension.c
+++ b/lib/dimension.c
@@ -330,7 +330,7 @@ dimension_collapse_sanity_check(gal_data_t *in, gal_data_t 
*weight,
   /* The requested dimension to collapse cannot be larger than the input's
      number of dimensions. */
   if( c_dim > (in->ndim-1) )
-    error(EXIT_FAILURE, 0, "%s: the input has %zu dimensions, but you have "
+    error(EXIT_FAILURE, 0, "%s: the input has %zu dimension(s), but you have "
           "asked to collapse dimension %zu", __func__, in->ndim, c_dim);
 
   /* If there is no blank value, there is no point in calculating the
diff --git a/lib/qsort.c b/lib/qsort.c
index 3bba8c6..20aa253 100644
--- a/lib/qsort.c
+++ b/lib/qsort.c
@@ -22,6 +22,7 @@ along with Gnuastro. If not, see 
<http://www.gnu.org/licenses/>.
 **********************************************************************/
 #include <config.h>
 
+#include <math.h>
 #include <stdlib.h>
 #include <stdint.h>
 
@@ -30,6 +31,31 @@ along with Gnuastro. If not, see 
<http://www.gnu.org/licenses/>.
 #include <gnuastro/qsort.h>
 
 
+/*****************************************************************/
+/**********                  Macros               ****************/
+/*****************************************************************/
+/* When one or both elements are NaN, the simple comparison, like `(tb >
+   ta) - (tb < ta)', will give 0 (as if the elements are equal). However,
+   some preference has to be given to the NaN element in a comparison,
+   otherwise the output is not going to be reasonable. We also don't want
+   to check NaNs on every comparison (it will slow down the processing).
+
+   So we'll exploit the fact that when there comparison result doesn't
+   equal zero, we don't have any NaNs and this `COMPARE_FLOAT_POSTPROCESS'
+   macro is called only when the comparison gives zero. Being larger or
+   smaller isn't defined for NaNs, so we'll just put them in the end of the
+   sorted list whether it is sorted by decreasing or increasing mode.*/
+#define COMPARE_FLOAT_POSTPROCESS (                                     \
+   isnan(ta) && isnan(tb)                                               \
+   ? 0                                /* Both NaN, define as equal. */  \
+   /* One is NaN, one isn't. */                                         \
+   : ( isnan(ta)                                                        \
+       ? 1                         /* First is NaN, set as smaller. */  \
+       : ( isnan(tb)                                                    \
+           ? -1                    /* Second is NaN, set as larger. */  \
+           : 0 )                      /* None are NaN, set as equal.*/  \
+       )                                                                \
+)
 
 
 
@@ -120,7 +146,6 @@ gal_qsort_uint64_i(const void *a, const void *b)
   return ( *(uint64_t *)a - *(uint64_t *)b );
 }
 
-
 int
 gal_qsort_int64_d(const void *a, const void *b)
 {
@@ -138,7 +163,8 @@ gal_qsort_float32_d(const void *a, const void *b)
 {
   float ta=*(float*)a;
   float tb=*(float*)b;
-  return (tb > ta) - (tb < ta);
+  int out=(tb > ta) - (tb < ta);
+  return out ? out : COMPARE_FLOAT_POSTPROCESS;
 }
 
 int
@@ -146,7 +172,8 @@ gal_qsort_float32_i(const void *a, const void *b)
 {
   float ta=*(float*)a;
   float tb=*(float*)b;
-  return (ta > tb) - (ta < tb);
+  int out=(ta > tb) - (ta < tb);
+  return out ? out : COMPARE_FLOAT_POSTPROCESS;
 }
 
 int
@@ -154,7 +181,8 @@ gal_qsort_float64_d(const void *a, const void *b)
 {
   double ta=*(double*)a;
   double tb=*(double*)b;
-  return (tb > ta) - (tb < ta);
+  int out=(tb > ta) - (tb < ta);
+  return out ? out : COMPARE_FLOAT_POSTPROCESS;
 }
 
 int
@@ -162,7 +190,8 @@ gal_qsort_float64_i(const void *a, const void *b)
 {
   double ta=*(double*)a;
   double tb=*(double*)b;
-  return (ta > tb) - (ta < tb);
+  int out=(ta > tb) - (ta < tb);
+  return out ? out : COMPARE_FLOAT_POSTPROCESS;
 }
 
 
@@ -323,7 +352,8 @@ gal_qsort_index_single_float32_d(const void *a, const void 
*b)
 {
   float ta=((float *)(gal_qsort_index_single))[ *(size_t *)a ];
   float tb=((float *)(gal_qsort_index_single))[ *(size_t *)b ];
-  return (tb > ta) - (tb < ta);
+  int out=(tb > ta) - (tb < ta);
+  return out ? out : COMPARE_FLOAT_POSTPROCESS;
 }
 
 int
@@ -331,7 +361,8 @@ gal_qsort_index_single_float32_i(const void *a, const void 
*b)
 {
   float ta=((float *)(gal_qsort_index_single))[ *(size_t *)a ];
   float tb=((float *)(gal_qsort_index_single))[ *(size_t *)b ];
-  return (ta > tb) - (ta < tb);
+  int out=(ta > tb) - (ta < tb);
+  return out ? out : COMPARE_FLOAT_POSTPROCESS;
 }
 
 int
@@ -339,7 +370,8 @@ gal_qsort_index_single_float64_d(const void *a, const void 
*b)
 {
   double ta=((double *)(gal_qsort_index_single))[ *(size_t *)a ];
   double tb=((double *)(gal_qsort_index_single))[ *(size_t *)b ];
-  return (tb > ta) - (tb < ta);
+  int out=(tb > ta) - (tb < ta);
+  return out ? out : COMPARE_FLOAT_POSTPROCESS;
 }
 
 int
@@ -347,7 +379,8 @@ gal_qsort_index_single_float64_i(const void *a, const void 
*b)
 {
   double ta=((double *)(gal_qsort_index_single))[ *(size_t *)a ];
   double tb=((double *)(gal_qsort_index_single))[ *(size_t *)b ];
-  return (ta > tb) - (ta < tb);
+  int out=(ta > tb) - (ta < tb);
+  return out ? out : COMPARE_FLOAT_POSTPROCESS;
 }
 
 int
@@ -362,7 +395,8 @@ gal_qsort_index_multi_d(const void *a, const void *b)
   float tb=B->values[ B->index ];
 
   /* Return the result. */
-  return (tb > ta) - (tb < ta);
+  int out=(tb > ta) - (tb < ta);
+  return out ? out : COMPARE_FLOAT_POSTPROCESS;
 }
 
 int
@@ -377,5 +411,6 @@ gal_qsort_index_multi_i(const void *a, const void *b)
   float tb=B->values[ B->index ];
 
   /* Return the result. */
-  return (ta > tb) - (ta < tb);
+  int out=(ta > tb) - (ta < tb);
+  return out ? out : COMPARE_FLOAT_POSTPROCESS;
 }



reply via email to

[Prev in Thread] Current Thread [Next in Thread]