Module stikpetP.other.poho_dunn

Expand source code
import pandas as pd
from statistics import NormalDist
from ..other.table_cross import tab_cross

def ph_dunn(catField, ordField, categories=None, levels=None):
    '''
    Post-Hoc Dunn Test
    ------------------
    This can be used as a post-hoc test for a Kruskal-Wallis test (see ts_kruskal_wallis()).
    
    The test compares each possible pair of categories from the catField and their mean rank. The null hypothesis is that these are then equal. A simple Bonferroni adjustment is also made for the multiple testing.
    
    Dunn (1964) describes two procedures. The first is his own, the second is one from Steel (1960) for comparison. The difference is that in Dunn's procedure the mean rank of each category is based on the scores of all categories, including those that are not being compared, while Steel's procedure re-calculates the mean rank for each category using only the scores from the two categories being compared. This later one wouls make it very similar to a pairwise Mann-Whitney U test (see ph_mann_whitney()).
    
    Other post-hoc tests that could be considered are Nemenyi, Steel-Dwass, Conover, a pairwise Mann-Whitney U, or pairwise Mood-Median.
    
    Parameters
    ----------
    catField : pandas series
        data with categories
    ordField : pandas series
        data with the scores
    categories : list or dictionary, optional
        the categories to use from catField
    levels : list or dictionary, optional
        the levels or order used in ordField.
        
    Returns
    -------
    A dataframe with:
    
    * *cat. 1*, one of the two categories being compared
    * *cat. 2*, second of the two categories being compared
    * *n1*, number of cat. 1. cases in comparison
    * *n2*, number of cat. 2 cases in comparison
    * *mean rank 1*, mean rank of cases in cat. 1, based on all cases (incl. categories not in comparison)
    * *mean rank 2*, mean rank of cases in cat. 2, based on all cases (incl. categories not in comparison)
    * *statistic*, the z-value of the test
    * *p-value*, the p-value (significance)
    * *adj. p-value*, the Bonferroni adjusted p-value
    
    Notes
    -----
    The formula used (Dunn, 1964, p. 249):
    $$z_{1,2} = \\frac{\\bar{r}_1 - \\bar{r}_2}{\\sqrt{\\sigma_m^2}}$$
    $$sig. = 2\\times\\left(1 -\\Phi\\left(z\\right)\\right)$$
    
    With:
    $$\\sigma_m^2 = \\left(\\frac{n\\times\\left(n+1\\right)}{12} - \\frac{T}{12\\times\\left(n - 1\\right)}\\right)\\times\\left(\\frac{1}{n_1} + \\frac{1}{n_2}\\right)$$
    $$T = \\sum_{j=1}^k t_j^3 - t_j$$
    $$\\bar{r}_i = \\frac{R_i}{n_i}$$
    $$R_i = \\sum_{j=1}^{n_i} r_{i,j}$$
    
    *Symbols used*
    
    * \\(k\\), the number of categories
    * \\(t_j\\), the frequency of the j-th unique rank.
    * \\(n_i\\), the number of scores in category i
    * \\(r_{i,j}\\), the rank of the j-th score in category i using all original scores (incl. those not in the comparison).
    * \\(R_i\\), the sum of the ranks in category i
    * \\(\\bar{r}_i\\), the average of the ranks in category i, using all original scores (incl. those not in the comparison).
    * \\(\\Phi\\left(\\dots\\right)\\), the cumulative distribution function of the standard normal distribution.
    
    References
    ----------
    Dunn, O. J. (1964). Multiple comparisons using rank sums. *Technometrics, 6*(3), 241–252. doi:10.1080/00401706.1964.10490181
    
    Steel, R. G. D. (1960). A rank sum test for comparing all pairs of treatments. *Technometrics, 2*(2), 197–207. doi:10.1080/00401706.1960.10489894
    
    Author
    ------
    Made by P. Stikker
    
    Companion website: https://PeterStatistics.com  
    YouTube channel: https://www.youtube.com/stikpet  
    Donations: https://www.patreon.com/bePatron?u=19398076
    
    '''
    
    #create the cross table    
    ct = tab_cross(ordField, catField, order1=levels, order2=categories, totals="include")
    
    #basic counts
    k = ct.shape[1]-1
    nlvl = ct.shape[0]-1
    n = ct.iloc[nlvl, k]
    
    #ties
    t = 0
    n = 0
    for i in range(0,nlvl):
        n = n + ct.iloc[i, k]
        tf = ct.iloc[i, k]
        t = t + tf**3 - tf
    
    a = n * (n + 1) / 12 - t / (12 * (n - 1))
    
    #the ranks of the levels
    lvlRank = pd.Series(dtype="object")
    cf = 0
    for i in range(0,nlvl):
        lvlRank.at[i] = (2 * cf + ct.iloc[i, k] + 1) / 2
        cf = cf + ct.iloc[i, k]
    
    #sum of ranks per category
    srj = pd.Series(dtype="object")
    for j in range(0,k):
        sr = 0
        for i in range(0,nlvl):
             sr = sr + ct.iloc[i, j] * lvlRank.iloc[i]
        
        srj.at[j] = sr
    
    ncomp = k * (k - 1) / 2
    res = pd.DataFrame()
    resRow = 0
    for i in range(0, k-1):
        for j in range(i+1, k):
            n1 = ct.iloc[nlvl, i]
            n2 = ct.iloc[nlvl, j]
            m1 = srj.iloc[i] / n1
            m2 = srj.iloc[j] / n2
            d = m1 - m2
            Var = a * (1 / n1 + 1 / n2)
            se = (Var)**0.5
            Z = d / se
            p = 2 * (1 - NormalDist().cdf(abs(Z)))
            
            res.at[resRow, 0] = ct.columns[i]
            res.at[resRow, 1] = ct.columns[j]
            res.at[resRow, 2] = n1
            res.at[resRow, 3] = n2
            res.at[resRow, 4] = m1
            res.at[resRow, 5] = m2
            res.at[resRow, 6] = Z
            res.at[resRow, 7] = p
            res.at[resRow, 8] = p * ncomp
            if res.iloc[resRow, 8] > 1:
                res.at[resRow, 8] = 1            
            
            resRow = resRow + 1
    
    colNames = ["cat. 1", "cat. 2", "n1", "n2", "mean rank 1", "mean rank 2", "statistic", "p-value", "adj. p-value"]
    res.columns = colNames
    return res

Functions

def ph_dunn(catField, ordField, categories=None, levels=None)

Post-Hoc Dunn Test

This can be used as a post-hoc test for a Kruskal-Wallis test (see ts_kruskal_wallis()).

The test compares each possible pair of categories from the catField and their mean rank. The null hypothesis is that these are then equal. A simple Bonferroni adjustment is also made for the multiple testing.

Dunn (1964) describes two procedures. The first is his own, the second is one from Steel (1960) for comparison. The difference is that in Dunn's procedure the mean rank of each category is based on the scores of all categories, including those that are not being compared, while Steel's procedure re-calculates the mean rank for each category using only the scores from the two categories being compared. This later one wouls make it very similar to a pairwise Mann-Whitney U test (see ph_mann_whitney()).

Other post-hoc tests that could be considered are Nemenyi, Steel-Dwass, Conover, a pairwise Mann-Whitney U, or pairwise Mood-Median.

Parameters

catField : pandas series
data with categories
ordField : pandas series
data with the scores
categories : list or dictionary, optional
the categories to use from catField
levels : list or dictionary, optional
the levels or order used in ordField.

Returns

A dataframe with:
 
  • cat. 1, one of the two categories being compared
  • cat. 2, second of the two categories being compared
  • n1, number of cat. 1. cases in comparison
  • n2, number of cat. 2 cases in comparison
  • mean rank 1, mean rank of cases in cat. 1, based on all cases (incl. categories not in comparison)
  • mean rank 2, mean rank of cases in cat. 2, based on all cases (incl. categories not in comparison)
  • statistic, the z-value of the test
  • p-value, the p-value (significance)
  • adj. p-value, the Bonferroni adjusted p-value

Notes

The formula used (Dunn, 1964, p. 249): z_{1,2} = \frac{\bar{r}_1 - \bar{r}_2}{\sqrt{\sigma_m^2}} sig. = 2\times\left(1 -\Phi\left(z\right)\right)

With: \sigma_m^2 = \left(\frac{n\times\left(n+1\right)}{12} - \frac{T}{12\times\left(n - 1\right)}\right)\times\left(\frac{1}{n_1} + \frac{1}{n_2}\right) T = \sum_{j=1}^k t_j^3 - t_j \bar{r}_i = \frac{R_i}{n_i} R_i = \sum_{j=1}^{n_i} r_{i,j}

Symbols used

  • k, the number of categories
  • t_j, the frequency of the j-th unique rank.
  • n_i, the number of scores in category i
  • r_{i,j}, the rank of the j-th score in category i using all original scores (incl. those not in the comparison).
  • R_i, the sum of the ranks in category i
  • \bar{r}_i, the average of the ranks in category i, using all original scores (incl. those not in the comparison).
  • \Phi\left(\dots\right), the cumulative distribution function of the standard normal distribution.

References

Dunn, O. J. (1964). Multiple comparisons using rank sums. Technometrics, 6(3), 241–252. doi:10.1080/00401706.1964.10490181

Steel, R. G. D. (1960). A rank sum test for comparing all pairs of treatments. Technometrics, 2(2), 197–207. doi:10.1080/00401706.1960.10489894

Author

Made by P. Stikker

Companion website: https://PeterStatistics.com
YouTube channel: https://www.youtube.com/stikpet
Donations: https://www.patreon.com/bePatron?u=19398076

Expand source code
def ph_dunn(catField, ordField, categories=None, levels=None):
    '''
    Post-Hoc Dunn Test
    ------------------
    This can be used as a post-hoc test for a Kruskal-Wallis test (see ts_kruskal_wallis()).
    
    The test compares each possible pair of categories from the catField and their mean rank. The null hypothesis is that these are then equal. A simple Bonferroni adjustment is also made for the multiple testing.
    
    Dunn (1964) describes two procedures. The first is his own, the second is one from Steel (1960) for comparison. The difference is that in Dunn's procedure the mean rank of each category is based on the scores of all categories, including those that are not being compared, while Steel's procedure re-calculates the mean rank for each category using only the scores from the two categories being compared. This later one wouls make it very similar to a pairwise Mann-Whitney U test (see ph_mann_whitney()).
    
    Other post-hoc tests that could be considered are Nemenyi, Steel-Dwass, Conover, a pairwise Mann-Whitney U, or pairwise Mood-Median.
    
    Parameters
    ----------
    catField : pandas series
        data with categories
    ordField : pandas series
        data with the scores
    categories : list or dictionary, optional
        the categories to use from catField
    levels : list or dictionary, optional
        the levels or order used in ordField.
        
    Returns
    -------
    A dataframe with:
    
    * *cat. 1*, one of the two categories being compared
    * *cat. 2*, second of the two categories being compared
    * *n1*, number of cat. 1. cases in comparison
    * *n2*, number of cat. 2 cases in comparison
    * *mean rank 1*, mean rank of cases in cat. 1, based on all cases (incl. categories not in comparison)
    * *mean rank 2*, mean rank of cases in cat. 2, based on all cases (incl. categories not in comparison)
    * *statistic*, the z-value of the test
    * *p-value*, the p-value (significance)
    * *adj. p-value*, the Bonferroni adjusted p-value
    
    Notes
    -----
    The formula used (Dunn, 1964, p. 249):
    $$z_{1,2} = \\frac{\\bar{r}_1 - \\bar{r}_2}{\\sqrt{\\sigma_m^2}}$$
    $$sig. = 2\\times\\left(1 -\\Phi\\left(z\\right)\\right)$$
    
    With:
    $$\\sigma_m^2 = \\left(\\frac{n\\times\\left(n+1\\right)}{12} - \\frac{T}{12\\times\\left(n - 1\\right)}\\right)\\times\\left(\\frac{1}{n_1} + \\frac{1}{n_2}\\right)$$
    $$T = \\sum_{j=1}^k t_j^3 - t_j$$
    $$\\bar{r}_i = \\frac{R_i}{n_i}$$
    $$R_i = \\sum_{j=1}^{n_i} r_{i,j}$$
    
    *Symbols used*
    
    * \\(k\\), the number of categories
    * \\(t_j\\), the frequency of the j-th unique rank.
    * \\(n_i\\), the number of scores in category i
    * \\(r_{i,j}\\), the rank of the j-th score in category i using all original scores (incl. those not in the comparison).
    * \\(R_i\\), the sum of the ranks in category i
    * \\(\\bar{r}_i\\), the average of the ranks in category i, using all original scores (incl. those not in the comparison).
    * \\(\\Phi\\left(\\dots\\right)\\), the cumulative distribution function of the standard normal distribution.
    
    References
    ----------
    Dunn, O. J. (1964). Multiple comparisons using rank sums. *Technometrics, 6*(3), 241–252. doi:10.1080/00401706.1964.10490181
    
    Steel, R. G. D. (1960). A rank sum test for comparing all pairs of treatments. *Technometrics, 2*(2), 197–207. doi:10.1080/00401706.1960.10489894
    
    Author
    ------
    Made by P. Stikker
    
    Companion website: https://PeterStatistics.com  
    YouTube channel: https://www.youtube.com/stikpet  
    Donations: https://www.patreon.com/bePatron?u=19398076
    
    '''
    
    #create the cross table    
    ct = tab_cross(ordField, catField, order1=levels, order2=categories, totals="include")
    
    #basic counts
    k = ct.shape[1]-1
    nlvl = ct.shape[0]-1
    n = ct.iloc[nlvl, k]
    
    #ties
    t = 0
    n = 0
    for i in range(0,nlvl):
        n = n + ct.iloc[i, k]
        tf = ct.iloc[i, k]
        t = t + tf**3 - tf
    
    a = n * (n + 1) / 12 - t / (12 * (n - 1))
    
    #the ranks of the levels
    lvlRank = pd.Series(dtype="object")
    cf = 0
    for i in range(0,nlvl):
        lvlRank.at[i] = (2 * cf + ct.iloc[i, k] + 1) / 2
        cf = cf + ct.iloc[i, k]
    
    #sum of ranks per category
    srj = pd.Series(dtype="object")
    for j in range(0,k):
        sr = 0
        for i in range(0,nlvl):
             sr = sr + ct.iloc[i, j] * lvlRank.iloc[i]
        
        srj.at[j] = sr
    
    ncomp = k * (k - 1) / 2
    res = pd.DataFrame()
    resRow = 0
    for i in range(0, k-1):
        for j in range(i+1, k):
            n1 = ct.iloc[nlvl, i]
            n2 = ct.iloc[nlvl, j]
            m1 = srj.iloc[i] / n1
            m2 = srj.iloc[j] / n2
            d = m1 - m2
            Var = a * (1 / n1 + 1 / n2)
            se = (Var)**0.5
            Z = d / se
            p = 2 * (1 - NormalDist().cdf(abs(Z)))
            
            res.at[resRow, 0] = ct.columns[i]
            res.at[resRow, 1] = ct.columns[j]
            res.at[resRow, 2] = n1
            res.at[resRow, 3] = n2
            res.at[resRow, 4] = m1
            res.at[resRow, 5] = m2
            res.at[resRow, 6] = Z
            res.at[resRow, 7] = p
            res.at[resRow, 8] = p * ncomp
            if res.iloc[resRow, 8] > 1:
                res.at[resRow, 8] = 1            
            
            resRow = resRow + 1
    
    colNames = ["cat. 1", "cat. 2", "n1", "n2", "mean rank 1", "mean rank 2", "statistic", "p-value", "adj. p-value"]
    res.columns = colNames
    return res