-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathplot_clustermaps.py
More file actions
493 lines (430 loc) · 19.8 KB
/
plot_clustermaps.py
File metadata and controls
493 lines (430 loc) · 19.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
#!/usr/bin/env python3
"""
clustermaps_cli.py
Build log-scaled abundance clustermaps (rows = taxa/ranks or ASVs, cols = samples)
with column color bars (type_group, status, optional kit), using palettes you provide.
Features
--------
- Robust CLI (no hard-coded paths).
- Ranks: Phylum/Class/Order/Family/Genus/Species/ASV_ID (configurable).
- Per-rank selection = union of:
* top N by total abundance within each type_group (configurable per rank), and
* any ASVs significant in an ISA table (optional; threshold configurable).
- Two heatmaps per rank:
* `_code` : column order preserved (no column clustering).
* `_clustered`: columns clustered.
- Outputs: SVG + PDF figures, and the underlying pivot table (TSV).
- Optional mitochondrial ASV clustermaps.
Example
-------
python clustermaps_cli.py \
--asv-meta /path/metadata/ASV_meta.tsv \
--metadata /path/metadata/metadata_updated.tsv \
--isa /path/indicspecies/Type_status_ISA_results.tsv \
--outdir /path/diversity \
--type-order "Oral Rinse,BAL,Bronchial Brush" \
--exclude-types "Skin Brush,Scope Flush" \
--type-palette "Oral Rinse=#6A3D9A,BAL=#0072B2,Bronchial Brush=#009E73" \
--status-palette "Non-Cancer=#FFFFFF,Cancer=#A50026,methods=#D3D3D3" \
--topN "Phylum=30,Class=30,Order=30,Family=30,Genus=30,Species=30,ASV_ID=6000" \
--isa-min-stat 0.6 \
--tick-values "5,50,500,5000,50000" \
--vmax 50000
# Add mitochondrial run:
--mito-asv /path/mito/ASVs/ASV_final.mito.tsv \
--mito-outdir /path/mito/diversity
"""
from __future__ import annotations
import argparse
from pathlib import Path
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.patches import Patch
# ----------------------- Matplotlib/Seaborn defaults ------------------------
mpl.rcParams['pdf.fonttype'] = 42 # Keep text as text in PDF
mpl.rcParams['svg.fonttype'] = 'none' # Keep text as text in SVG
mpl.rcParams['savefig.dpi'] = 600
plt.rcParams.update({'font.size': 12})
plt.rcParams['font.family'] = 'Source Sans Pro'
sns.set_theme()
sns.set_style("white")
# ------------------------------- Utilities ----------------------------------
def parse_kv_csv(s: str) -> dict:
"""
Parse "A=#fff,B:#123,C=steelblue" or "Phylum=30,Class=30" into dict[str,str].
Returns {} if s is falsy/empty.
"""
if not s:
return {}
out = {}
for item in s.split(","):
item = item.strip()
if not item:
continue
if "=" in item:
k, v = item.split("=", 1)
elif ":" in item:
k, v = item.split(":", 1)
else:
warnings.warn(f"Ignoring malformed mapping: {item!r}")
continue
out[k.strip()] = v.strip()
return out
def ensure_cols(df: pd.DataFrame, required: list[str], where: str):
missing = [c for c in required if c not in df.columns]
if missing:
raise ValueError(f"Missing columns in {where}: {missing}")
def log10_transform(df: pd.DataFrame) -> pd.DataFrame:
"""Return log10(x+1)."""
return np.log10(df + 1.0)
def dynamic_height(n_rows: int, per_row: float = 0.4, min_h: float = 8.0, max_h: float = 6000.0) -> float:
return float(np.clip(per_row * max(n_rows, 1), min_h, max_h))
def build_rank_universe(
asv_meta: pd.DataFrame,
ranks: list[str],
type_order: list[str],
exclude_types: set[str],
sig_asvs: set[str],
topN_map: dict[str, int],
count_col: str,
) -> dict[str, list[str]]:
"""
For each rank, compute union across type_groups:
top N (within group) + significant ASVs' taxa for that rank.
Returns rank -> allowed label list.
"""
keep = asv_meta[~asv_meta["type_group"].isin(exclude_types)].copy()
universe: dict[str, list[str]] = {}
for rank in ranks:
# aggregate per group
grp = keep.groupby(["type_group", rank], dropna=False)[count_col].sum().reset_index()
all_labels = set()
for g in type_order:
gdf = grp[grp["type_group"] == g]
if gdf.empty:
continue
topN = int(topN_map.get(rank, 30))
top_labels = gdf.sort_values(count_col, ascending=False)[rank].head(topN).tolist()
all_labels.update(top_labels)
# add any significant ASVs (map ASV -> this rank)
if "ASV_ID" in keep.columns and sig_asvs:
sig_rank = keep[keep["ASV_ID"].isin(sig_asvs)][rank].unique().tolist()
all_labels.update(sig_rank)
# finalize
universe[rank] = sorted({("Other" if (x is None or (isinstance(x, float) and np.isnan(x))) else x) for x in all_labels})
return universe
def assign_plot_labels(asv_meta: pd.DataFrame, rank: str, allowed: list[str]) -> pd.Series:
allowed_set = set(allowed)
col = f"{rank}_plot"
return asv_meta[rank].apply(lambda x: x if (x in allowed_set) else "Other")
def col_colors_from_meta(
samples: list[str],
sample_meta: pd.DataFrame,
type_palette: dict[str, str],
status_palette: dict[str, str],
kit_palette: dict[str, str] | None = None,
) -> pd.DataFrame:
"""
Return a DataFrame with index = samples (columns of heatmap),
columns = color bars (type_group, status, optionally kit), containing hex colors.
"""
sub = sample_meta.loc[samples, ["type_group", "status"]].copy()
sub["type_group"] = sub["type_group"].map(lambda k: type_palette.get(str(k), "#D3D3D3"))
sub["status"] = sub["status"].map(lambda k: status_palette.get(str(k), "#D3D3D3"))
out = pd.DataFrame({"type_group": sub["type_group"], "status": sub["status"]})
if kit_palette is not None and "kit" in sample_meta.columns:
out["kit"] = sample_meta.loc[samples, "kit"].map(lambda k: kit_palette.get(str(k), "#D3D3D3"))
return out
def draw_clustermap(
pivot: pd.DataFrame,
col_colors_df: pd.DataFrame,
outfile_prefix: Path,
tick_vals_orig: list[int],
vmax_display: int,
figsize_w: float,
row_height: float,
min_fig_h: float,
max_fig_h: float,
method: str = "ward",
metric: str = "euclidean",
dendrogram_ratio=(.05, .2),
colors_ratio=0.02,
cbar_pos=(1.02, 0.2, 0.03, 0.4),
alpha: float = 0.75,
):
"""
Make two plots:
- *_code : column order preserved (col_cluster=False)
- *_clustered : default clustermap with column clustering (col_cluster=True)
"""
# Order columns for colors
col_colors_df = col_colors_df.loc[pivot.columns]
# Log10 transform
pivot_log = log10_transform(pivot)
# Greyscale cmap
cmap = LinearSegmentedColormap.from_list("light_greyscale", ['#ffffff', '#d9d9d9', '#000000'], N=256)
# Colorbar ticks
tick_vals_log = [np.log10(v + 1) for v in tick_vals_orig]
vmax_log = np.log10(vmax_display + 1)
n_rows = pivot.shape[0]
height = dynamic_height(n_rows, per_row=row_height, min_h=min_fig_h, max_h=max_fig_h)
# 1) No column clustering (code)
g = sns.clustermap(
pivot_log,
method=method,
metric=metric,
col_colors=col_colors_df,
cmap=cmap,
vmin=0, vmax=vmax_log,
linewidths=0.5,
xticklabels=True,
yticklabels=True,
dendrogram_ratio=dendrogram_ratio,
colors_ratio=colors_ratio,
figsize=(figsize_w, height),
cbar_pos=cbar_pos,
alpha=alpha,
col_cluster=False
)
# colorbar
cbar = g.ax_heatmap.collections[0].colorbar
cbar.set_ticks(tick_vals_log)
cbar.set_ticklabels([f"{v:,}" for v in tick_vals_orig])
cbar.set_label("ASV Count", rotation=270, labelpad=15)
# force x tick labels visible and match columns
g.ax_heatmap.set_xticks(g.ax_heatmap.get_xticks())
g.ax_heatmap.set_xticklabels(pivot_log.columns, rotation=90, ha='center')
g.ax_heatmap.tick_params(axis='x', bottom=True, labelbottom=True, length=5)
out_svg = outfile_prefix.with_suffix(".svg")
out_pdf = outfile_prefix.with_suffix(".pdf")
plt.savefig(out_svg, bbox_inches='tight')
plt.savefig(out_pdf, bbox_inches='tight')
plt.close()
# 2) With column clustering
g = sns.clustermap(
pivot_log,
method=method,
metric=metric,
col_colors=col_colors_df,
cmap=cmap,
vmin=0, vmax=vmax_log,
linewidths=0.5,
xticklabels=True,
yticklabels=True,
dendrogram_ratio=dendrogram_ratio,
colors_ratio=colors_ratio,
figsize=(figsize_w, height),
cbar_pos=cbar_pos,
alpha=alpha,
col_cluster=True
)
cbar = g.ax_heatmap.collections[0].colorbar
cbar.set_ticks(tick_vals_log)
cbar.set_ticklabels([f"{v:,}" for v in tick_vals_orig])
cbar.set_label("ASV Count", rotation=270, labelpad=15)
g.ax_heatmap.tick_params(axis='x', bottom=True, labelbottom=True, length=5)
out2_svg = outfile_prefix.with_name(outfile_prefix.stem.replace("_code", "_clustered")).with_suffix(".svg")
out2_pdf = out2_svg.with_suffix(".pdf")
plt.savefig(out2_svg, bbox_inches='tight')
plt.savefig(out2_pdf, bbox_inches='tight')
plt.close()
def read_isa_sig_asvs(isa_path: Path, min_stat: float) -> set[str]:
"""
Read the combined ISA results and return ASV_IDs considered 'significant' for inclusion:
(type_significance == True OR status_significance == True)
AND (type_stat >= min_stat OR status_stat >= min_stat)
"""
if not isa_path:
return set()
df = pd.read_csv(isa_path, sep="\t", header=0)
if "ASV_ID" not in df.columns:
df.rename(columns={df.columns[0]: "ASV_ID"}, inplace=True)
# guard missing columns
for c in ["type_significance", "status_significance", "type_stat", "status_stat"]:
if c not in df.columns:
warnings.warn(f"ISA table missing column {c!r}; falling back to top-N selection only.")
return set()
m = ((df["type_significance"] == True) | (df["status_significance"] == True)) & \
((df["type_stat"].fillna(0) >= min_stat) | (df["status_stat"].fillna(0) >= min_stat))
return set(df.loc[m, "ASV_ID"].astype(str))
# ------------------------------- Main ---------------------------------------
def main():
ap = argparse.ArgumentParser(
description="Clustermap pipeline (ranks/ASV + mitochondrial optional) with robust CLI."
)
# Core inputs
ap.add_argument("--asv-meta", type=Path, required=True,
help="ASV_meta.tsv (must contain: ASV_ID, sample_code, sample, type_group, status, [kit], ranks, and count column).")
ap.add_argument("--metadata", type=Path, required=True,
help="metadata_updated.tsv (must contain 'sample', 'sample_code', 'type_group', 'status', [kit]).")
ap.add_argument("--isa", type=Path, default=None,
help="Optional Type_status_ISA_results.tsv to include significant ASVs into rank selection.")
ap.add_argument("--outdir", type=Path, required=True,
help="Output directory for figures and pivot tables.")
# Palettes & orders
ap.add_argument("--type-order", type=str, default="Oral Rinse,BAL,Bronchial Brush",
help="Comma-separated ordering of type_group.")
ap.add_argument("--exclude-types", type=str, default="Skin Brush,Scope Flush",
help="Comma-separated type_group values to exclude.")
ap.add_argument("--type-palette", type=str, required=True,
help='e.g. "Oral Rinse=#6A3D9A,BAL=#0072B2,Bronchial Brush=#009E73"')
ap.add_argument("--status-palette", type=str, required=True,
help='e.g. "Non-Cancer=#FFFFFF,Cancer=#A50026,methods=#D3D3D3"')
ap.add_argument("--kit-palette", type=str, default="",
help='Optional: "HostZERO-DEP=#000000,HostZERO-NODEP=#808080,SPARK-ZYMO=#87CEEB"')
# Ranks & selection
ap.add_argument("--ranks", type=str, default="Phylum,Class,Order,Family,Genus,Species,ASV_ID",
help="Comma-separated ranks to plot.")
ap.add_argument("--topN", type=str, default="Phylum=30,Class=30,Order=30,Family=30,Genus=30,Species=30,ASV_ID=6000",
help="Per-rank top-N selection, e.g. 'Phylum=30,...,ASV_ID=6000'.")
ap.add_argument("--count-col", type=str, default="corr_count",
help="Abundance column in ASV_meta (default: corr_count).")
ap.add_argument("--isa-min-stat", type=float, default=0.6,
help="Minimum stat to consider an ASV 'significant' in ISA gate (default: 0.6).")
# Heatmap look
ap.add_argument("--tick-values", type=str, default="5,50,500,5000,50000",
help="Comma-separated original scale ticks for colorbar.")
ap.add_argument("--vmax", type=int, default=50000, help="Max display value for colorbar (on original scale).")
ap.add_argument("--figwidth", type=float, default=32.0, help="Heatmap figure width (inches).")
ap.add_argument("--row-height", type=float, default=0.4, help="Height per row (inches).")
ap.add_argument("--min-height", type=float, default=8.0, help="Minimum figure height (inches).")
ap.add_argument("--max-height", type=float, default=6000.0, help="Maximum figure height (inches).")
# Mitochondrial (optional)
ap.add_argument("--mito-asv", type=Path, default=None,
help="Optional ASV_final.mito.tsv to build mitochondrial clustermaps.")
ap.add_argument("--mito-outdir", type=Path, default=None,
help="Optional output directory for mito figures/pivots (defaults to OUTDIR/'mito').")
ap.add_argument("--mito-count-col", type=str, default="count",
help="Mito abundance field after stacking (default: count).")
args = ap.parse_args()
outdir = args.outdir
outdir.mkdir(parents=True, exist_ok=True)
# Palettes
type_palette = parse_kv_csv(args.type_palette)
status_palette = parse_kv_csv(args.status_palette)
kit_palette = parse_kv_csv(args.kit_palette) if args.kit_palette else None
type_order = [x.strip() for x in args.type_order.split(",") if x.strip()]
exclude_types = {x.strip() for x in args.exclude_types.split(",") if x.strip()}
ranks = [x.strip() for x in args.ranks.split(",") if x.strip()]
topN_map = {k: int(v) for k, v in parse_kv_csv(args.topN).items()}
tick_vals_orig = [int(x.strip()) for x in args.tick_values.split(",") if x.strip()]
vmax_display = int(args.vmax)
# Read metadata (for column colors)
meta = pd.read_csv(args.metadata, sep="\t", header=0)
ensure_cols(meta, ["sample", "sample_code", "type_group", "status"], "metadata_updated.tsv")
meta = meta.set_index("sample")
# Read ASV meta (counts per ASV/sample with ranks)
asv_meta = pd.read_csv(args.asv_meta, sep="\t", header=0)
ensure_cols(asv_meta, ["ASV_ID", "sample_code", "sample", "type_group", "status", args.count_col], "ASV_meta.tsv")
# ISA gate (optional)
sig_asvs = read_isa_sig_asvs(args.isa, args.isa_min_stat) if args.isa else set()
# Build rank universes
rank_universe = build_rank_universe(
asv_meta=asv_meta,
ranks=ranks,
type_order=type_order,
exclude_types=exclude_types,
sig_asvs=sig_asvs,
topN_map=topN_map,
count_col=args.count_col,
)
# For each rank: add *_plot labels, pivot (rows=taxa_plot, cols=sample_code), plot
working = asv_meta.copy()
working = working[~working["type_group"].isin(exclude_types)]
# sample meta (for col_colors) keyed by sample_code
smeta = meta.reset_index().drop_duplicates("sample")[["sample", "sample_code", "type_group", "status"]]
if kit_palette is not None and "kit" in asv_meta.columns:
smeta = pd.merge(smeta, asv_meta[["sample", "kit"]].drop_duplicates("sample"), on="sample", how="left")
smeta = smeta.set_index("sample_code")
for rank in ranks:
allowed = rank_universe.get(rank, [])
colname = f"{rank}_plot"
working[colname] = assign_plot_labels(working, rank, allowed)
# (rows = taxa_plot, cols = sample_code)
pivot = (working.groupby(["sample_code", colname])[args.count_col]
.sum().reset_index()
.pivot(index=colname, columns="sample_code", values=args.count_col)
.fillna(0))
# Column colors aligned to columns
col_colors_df = col_colors_from_meta(
samples=pivot.columns.tolist(),
sample_meta=smeta,
type_palette=type_palette,
status_palette=status_palette,
kit_palette=kit_palette,
)
# Legends (drawn on figure as standard legend would overlap); create patches for reference
legend_patches = []
for g in type_order:
if g in type_palette:
legend_patches.append(Patch(facecolor=type_palette[g], label=f"Type: {g}", alpha=0.75))
for st, col in status_palette.items():
legend_patches.append(Patch(facecolor=col, label=f"status: {st}", alpha=0.75))
# (We rely on column color bars; patches are not explicitly added; feel free to adapt.)
# Draw
prefix = outdir / f"clustermap_{colname}_code"
draw_clustermap(
pivot=pivot,
col_colors_df=col_colors_df,
outfile_prefix=prefix,
tick_vals_orig=tick_vals_orig,
vmax_display=vmax_display,
figsize_w=args.figwidth,
row_height=args.row_height,
min_fig_h=args.min_height,
max_fig_h=args.max_height,
)
# Save pivot
pivot_out = outdir / f"clustermap_{colname}.tsv"
pivot.to_csv(pivot_out, sep="\t")
# ------------------------- Mitochondrial (optional) -------------------------
if args.mito_asv:
mito_outdir = args.mito_outdir or (outdir / "mito")
mito_outdir.mkdir(parents=True, exist_ok=True)
mito_df = pd.read_csv(args.mito_asv, sep="\t", header=0, index_col=0)
# columns look like samples; ensure names match metadata.sample (and we have sample_code)
mito_df.columns = [str(c).rsplit("_", 1)[0] for c in mito_df.columns] # mimic original cleanup
mito_stack = mito_df.stack().reset_index()
mito_stack.columns = ["ASV_ID", "sample", args.mito_count_col]
mito_stack = mito_stack[mito_stack[args.mito_count_col] > 0]
# join metadata to get sample_code/type_group/status/kit
mito_meta = pd.merge(mito_stack, meta.reset_index(), on="sample", how="left")
mito_meta = mito_meta[~mito_meta["type_group"].isin(exclude_types)]
# Build pivot (rows=ASV_ID, cols=sample_code)
pivot = (mito_meta.groupby(["sample_code", "ASV_ID"])[args.mito_count_col]
.sum().reset_index()
.pivot(index="ASV_ID", columns="sample_code", values=args.mito_count_col)
.fillna(0))
# Column color bars
smeta_m = meta.reset_index().drop_duplicates("sample")[["sample", "sample_code", "type_group", "status"]]
if kit_palette is not None and "kit" in mito_meta.columns:
smeta_m = pd.merge(smeta_m, mito_meta[["sample", "kit"]].drop_duplicates("sample"), on="sample", how="left")
smeta_m = smeta_m.set_index("sample_code")
col_colors_df = col_colors_from_meta(
samples=pivot.columns.tolist(),
sample_meta=smeta_m,
type_palette=type_palette,
status_palette=status_palette,
kit_palette=kit_palette,
)
prefix = mito_outdir / "clustermap_ASV_code_mito"
draw_clustermap(
pivot=pivot,
col_colors_df=col_colors_df,
outfile_prefix=prefix,
tick_vals_orig=tick_vals_orig,
vmax_display=vmax_display,
figsize_w=args.figwidth,
row_height=args.row_height,
min_fig_h=args.min_height,
max_fig_h=args.max_height,
)
pivot.to_csv(mito_outdir / "clustermap_ASV_mito.tsv", sep="\t")
print(f"Done. Outputs in: {outdir}")
if __name__ == "__main__":
main()