+
+ Read the Docs
+ v: ${config.versions.current.slug}
+
+
+
+
+ ${renderLanguages(config)}
+ ${renderVersions(config)}
+ ${renderDownloads(config)}
+
+ On Read the Docs
+
+ Project Home
+
+
+ Builds
+
+
+ Downloads
+
+
+
+ Search
+
+
+
+
+
+
+ Hosted by Read the Docs
+
+
+
+ `;
+
+ // Inject the generated flyout into the body HTML element.
+ document.body.insertAdjacentHTML("beforeend", flyout);
+
+ // Trigger the Read the Docs Addons Search modal when clicking on the "Search docs" input from inside the flyout.
+ document
+ .querySelector("#flyout-search-form")
+ .addEventListener("focusin", () => {
+ const event = new CustomEvent("readthedocs-search-show");
+ document.dispatchEvent(event);
+ });
+ })
+}
+
+if (themeLanguageSelector || themeVersionSelector) {
+ function onSelectorSwitch(event) {
+ const option = event.target.selectedIndex;
+ const item = event.target.options[option];
+ window.location.href = item.dataset.url;
+ }
+
+ document.addEventListener("readthedocs-addons-data-ready", function (event) {
+ const config = event.detail.data();
+
+ const versionSwitch = document.querySelector(
+ "div.switch-menus > div.version-switch",
+ );
+ if (themeVersionSelector) {
+ let versions = config.versions.active;
+ if (config.versions.current.hidden || config.versions.current.type === "external") {
+ versions.unshift(config.versions.current);
+ }
+ const versionSelect = `
+
+ ${versions
+ .map(
+ (version) => `
+
+ ${version.slug}
+ `,
+ )
+ .join("\n")}
+
+ `;
+
+ versionSwitch.innerHTML = versionSelect;
+ versionSwitch.firstElementChild.addEventListener("change", onSelectorSwitch);
+ }
+
+ const languageSwitch = document.querySelector(
+ "div.switch-menus > div.language-switch",
+ );
+
+ if (themeLanguageSelector) {
+ if (config.projects.translations.length) {
+ // Add the current language to the options on the selector
+ let languages = config.projects.translations.concat(
+ config.projects.current,
+ );
+ languages = languages.sort((a, b) =>
+ a.language.name.localeCompare(b.language.name),
+ );
+
+ const languageSelect = `
+
+ ${languages
+ .map(
+ (language) => `
+
+ ${language.language.name}
+ `,
+ )
+ .join("\n")}
+
+ `;
+
+ languageSwitch.innerHTML = languageSelect;
+ languageSwitch.firstElementChild.addEventListener("change", onSelectorSwitch);
+ }
+ else {
+ languageSwitch.remove();
+ }
+ }
+ });
+}
+
+document.addEventListener("readthedocs-addons-data-ready", function (event) {
+ // Trigger the Read the Docs Addons Search modal when clicking on "Search docs" input from the topnav.
+ document
+ .querySelector("[role='search'] input")
+ .addEventListener("focusin", () => {
+ const event = new CustomEvent("readthedocs-search-show");
+ document.dispatchEvent(event);
+ });
+});
\ No newline at end of file
diff --git a/docs/_static/language_data.js b/docs/_static/language_data.js
new file mode 100644
index 0000000..c7fe6c6
--- /dev/null
+++ b/docs/_static/language_data.js
@@ -0,0 +1,192 @@
+/*
+ * This script contains the language-specific data used by searchtools.js,
+ * namely the list of stopwords, stemmer, scorer and splitter.
+ */
+
+var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"];
+
+
+/* Non-minified version is copied as a separate JS file, if available */
+
+/**
+ * Porter Stemmer
+ */
+var Stemmer = function() {
+
+ var step2list = {
+ ational: 'ate',
+ tional: 'tion',
+ enci: 'ence',
+ anci: 'ance',
+ izer: 'ize',
+ bli: 'ble',
+ alli: 'al',
+ entli: 'ent',
+ eli: 'e',
+ ousli: 'ous',
+ ization: 'ize',
+ ation: 'ate',
+ ator: 'ate',
+ alism: 'al',
+ iveness: 'ive',
+ fulness: 'ful',
+ ousness: 'ous',
+ aliti: 'al',
+ iviti: 'ive',
+ biliti: 'ble',
+ logi: 'log'
+ };
+
+ var step3list = {
+ icate: 'ic',
+ ative: '',
+ alize: 'al',
+ iciti: 'ic',
+ ical: 'ic',
+ ful: '',
+ ness: ''
+ };
+
+ var c = "[^aeiou]"; // consonant
+ var v = "[aeiouy]"; // vowel
+ var C = c + "[^aeiouy]*"; // consonant sequence
+ var V = v + "[aeiou]*"; // vowel sequence
+
+ var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0
+ var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1
+ var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1
+ var s_v = "^(" + C + ")?" + v; // vowel in stem
+
+ this.stemWord = function (w) {
+ var stem;
+ var suffix;
+ var firstch;
+ var origword = w;
+
+ if (w.length < 3)
+ return w;
+
+ var re;
+ var re2;
+ var re3;
+ var re4;
+
+ firstch = w.substr(0,1);
+ if (firstch == "y")
+ w = firstch.toUpperCase() + w.substr(1);
+
+ // Step 1a
+ re = /^(.+?)(ss|i)es$/;
+ re2 = /^(.+?)([^s])s$/;
+
+ if (re.test(w))
+ w = w.replace(re,"$1$2");
+ else if (re2.test(w))
+ w = w.replace(re2,"$1$2");
+
+ // Step 1b
+ re = /^(.+?)eed$/;
+ re2 = /^(.+?)(ed|ing)$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ re = new RegExp(mgr0);
+ if (re.test(fp[1])) {
+ re = /.$/;
+ w = w.replace(re,"");
+ }
+ }
+ else if (re2.test(w)) {
+ var fp = re2.exec(w);
+ stem = fp[1];
+ re2 = new RegExp(s_v);
+ if (re2.test(stem)) {
+ w = stem;
+ re2 = /(at|bl|iz)$/;
+ re3 = new RegExp("([^aeiouylsz])\\1$");
+ re4 = new RegExp("^" + C + v + "[^aeiouwxy]$");
+ if (re2.test(w))
+ w = w + "e";
+ else if (re3.test(w)) {
+ re = /.$/;
+ w = w.replace(re,"");
+ }
+ else if (re4.test(w))
+ w = w + "e";
+ }
+ }
+
+ // Step 1c
+ re = /^(.+?)y$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ re = new RegExp(s_v);
+ if (re.test(stem))
+ w = stem + "i";
+ }
+
+ // Step 2
+ re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ suffix = fp[2];
+ re = new RegExp(mgr0);
+ if (re.test(stem))
+ w = stem + step2list[suffix];
+ }
+
+ // Step 3
+ re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ suffix = fp[2];
+ re = new RegExp(mgr0);
+ if (re.test(stem))
+ w = stem + step3list[suffix];
+ }
+
+ // Step 4
+ re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/;
+ re2 = /^(.+?)(s|t)(ion)$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ re = new RegExp(mgr1);
+ if (re.test(stem))
+ w = stem;
+ }
+ else if (re2.test(w)) {
+ var fp = re2.exec(w);
+ stem = fp[1] + fp[2];
+ re2 = new RegExp(mgr1);
+ if (re2.test(stem))
+ w = stem;
+ }
+
+ // Step 5
+ re = /^(.+?)e$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ re = new RegExp(mgr1);
+ re2 = new RegExp(meq1);
+ re3 = new RegExp("^" + C + v + "[^aeiouwxy]$");
+ if (re.test(stem) || (re2.test(stem) && !(re3.test(stem))))
+ w = stem;
+ }
+ re = /ll$/;
+ re2 = new RegExp(mgr1);
+ if (re.test(w) && re2.test(w)) {
+ re = /.$/;
+ w = w.replace(re,"");
+ }
+
+ // and turn initial Y back to y
+ if (firstch == "y")
+ w = firstch.toLowerCase() + w.substr(1);
+ return w;
+ }
+}
+
diff --git a/docs/_static/minus.png b/docs/_static/minus.png
new file mode 100644
index 0000000..d96755f
Binary files /dev/null and b/docs/_static/minus.png differ
diff --git a/docs/_static/plus.png b/docs/_static/plus.png
new file mode 100644
index 0000000..7107cec
Binary files /dev/null and b/docs/_static/plus.png differ
diff --git a/docs/_static/pygments.css b/docs/_static/pygments.css
new file mode 100644
index 0000000..6f8b210
--- /dev/null
+++ b/docs/_static/pygments.css
@@ -0,0 +1,75 @@
+pre { line-height: 125%; }
+td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; }
+span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; }
+td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; }
+span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; }
+.highlight .hll { background-color: #ffffcc }
+.highlight { background: #f8f8f8; }
+.highlight .c { color: #3D7B7B; font-style: italic } /* Comment */
+.highlight .err { border: 1px solid #F00 } /* Error */
+.highlight .k { color: #008000; font-weight: bold } /* Keyword */
+.highlight .o { color: #666 } /* Operator */
+.highlight .ch { color: #3D7B7B; font-style: italic } /* Comment.Hashbang */
+.highlight .cm { color: #3D7B7B; font-style: italic } /* Comment.Multiline */
+.highlight .cp { color: #9C6500 } /* Comment.Preproc */
+.highlight .cpf { color: #3D7B7B; font-style: italic } /* Comment.PreprocFile */
+.highlight .c1 { color: #3D7B7B; font-style: italic } /* Comment.Single */
+.highlight .cs { color: #3D7B7B; font-style: italic } /* Comment.Special */
+.highlight .gd { color: #A00000 } /* Generic.Deleted */
+.highlight .ge { font-style: italic } /* Generic.Emph */
+.highlight .ges { font-weight: bold; font-style: italic } /* Generic.EmphStrong */
+.highlight .gr { color: #E40000 } /* Generic.Error */
+.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */
+.highlight .gi { color: #008400 } /* Generic.Inserted */
+.highlight .go { color: #717171 } /* Generic.Output */
+.highlight .gp { color: #000080; font-weight: bold } /* Generic.Prompt */
+.highlight .gs { font-weight: bold } /* Generic.Strong */
+.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */
+.highlight .gt { color: #04D } /* Generic.Traceback */
+.highlight .kc { color: #008000; font-weight: bold } /* Keyword.Constant */
+.highlight .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */
+.highlight .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */
+.highlight .kp { color: #008000 } /* Keyword.Pseudo */
+.highlight .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */
+.highlight .kt { color: #B00040 } /* Keyword.Type */
+.highlight .m { color: #666 } /* Literal.Number */
+.highlight .s { color: #BA2121 } /* Literal.String */
+.highlight .na { color: #687822 } /* Name.Attribute */
+.highlight .nb { color: #008000 } /* Name.Builtin */
+.highlight .nc { color: #00F; font-weight: bold } /* Name.Class */
+.highlight .no { color: #800 } /* Name.Constant */
+.highlight .nd { color: #A2F } /* Name.Decorator */
+.highlight .ni { color: #717171; font-weight: bold } /* Name.Entity */
+.highlight .ne { color: #CB3F38; font-weight: bold } /* Name.Exception */
+.highlight .nf { color: #00F } /* Name.Function */
+.highlight .nl { color: #767600 } /* Name.Label */
+.highlight .nn { color: #00F; font-weight: bold } /* Name.Namespace */
+.highlight .nt { color: #008000; font-weight: bold } /* Name.Tag */
+.highlight .nv { color: #19177C } /* Name.Variable */
+.highlight .ow { color: #A2F; font-weight: bold } /* Operator.Word */
+.highlight .w { color: #BBB } /* Text.Whitespace */
+.highlight .mb { color: #666 } /* Literal.Number.Bin */
+.highlight .mf { color: #666 } /* Literal.Number.Float */
+.highlight .mh { color: #666 } /* Literal.Number.Hex */
+.highlight .mi { color: #666 } /* Literal.Number.Integer */
+.highlight .mo { color: #666 } /* Literal.Number.Oct */
+.highlight .sa { color: #BA2121 } /* Literal.String.Affix */
+.highlight .sb { color: #BA2121 } /* Literal.String.Backtick */
+.highlight .sc { color: #BA2121 } /* Literal.String.Char */
+.highlight .dl { color: #BA2121 } /* Literal.String.Delimiter */
+.highlight .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */
+.highlight .s2 { color: #BA2121 } /* Literal.String.Double */
+.highlight .se { color: #AA5D1F; font-weight: bold } /* Literal.String.Escape */
+.highlight .sh { color: #BA2121 } /* Literal.String.Heredoc */
+.highlight .si { color: #A45A77; font-weight: bold } /* Literal.String.Interpol */
+.highlight .sx { color: #008000 } /* Literal.String.Other */
+.highlight .sr { color: #A45A77 } /* Literal.String.Regex */
+.highlight .s1 { color: #BA2121 } /* Literal.String.Single */
+.highlight .ss { color: #19177C } /* Literal.String.Symbol */
+.highlight .bp { color: #008000 } /* Name.Builtin.Pseudo */
+.highlight .fm { color: #00F } /* Name.Function.Magic */
+.highlight .vc { color: #19177C } /* Name.Variable.Class */
+.highlight .vg { color: #19177C } /* Name.Variable.Global */
+.highlight .vi { color: #19177C } /* Name.Variable.Instance */
+.highlight .vm { color: #19177C } /* Name.Variable.Magic */
+.highlight .il { color: #666 } /* Literal.Number.Integer.Long */
\ No newline at end of file
diff --git a/docs/_static/searchtools.js b/docs/_static/searchtools.js
new file mode 100644
index 0000000..2c774d1
--- /dev/null
+++ b/docs/_static/searchtools.js
@@ -0,0 +1,632 @@
+/*
+ * Sphinx JavaScript utilities for the full-text search.
+ */
+"use strict";
+
+/**
+ * Simple result scoring code.
+ */
+if (typeof Scorer === "undefined") {
+ var Scorer = {
+ // Implement the following function to further tweak the score for each result
+ // The function takes a result array [docname, title, anchor, descr, score, filename]
+ // and returns the new score.
+ /*
+ score: result => {
+ const [docname, title, anchor, descr, score, filename, kind] = result
+ return score
+ },
+ */
+
+ // query matches the full name of an object
+ objNameMatch: 11,
+ // or matches in the last dotted part of the object name
+ objPartialMatch: 6,
+ // Additive scores depending on the priority of the object
+ objPrio: {
+ 0: 15, // used to be importantResults
+ 1: 5, // used to be objectResults
+ 2: -5, // used to be unimportantResults
+ },
+ // Used when the priority is not in the mapping.
+ objPrioDefault: 0,
+
+ // query found in title
+ title: 15,
+ partialTitle: 7,
+ // query found in terms
+ term: 5,
+ partialTerm: 2,
+ };
+}
+
+// Global search result kind enum, used by themes to style search results.
+class SearchResultKind {
+ static get index() { return "index"; }
+ static get object() { return "object"; }
+ static get text() { return "text"; }
+ static get title() { return "title"; }
+}
+
+const _removeChildren = (element) => {
+ while (element && element.lastChild) element.removeChild(element.lastChild);
+};
+
+/**
+ * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping
+ */
+const _escapeRegExp = (string) =>
+ string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string
+
+const _displayItem = (item, searchTerms, highlightTerms) => {
+ const docBuilder = DOCUMENTATION_OPTIONS.BUILDER;
+ const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX;
+ const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX;
+ const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY;
+ const contentRoot = document.documentElement.dataset.content_root;
+
+ const [docName, title, anchor, descr, score, _filename, kind] = item;
+
+ let listItem = document.createElement("li");
+ // Add a class representing the item's type:
+ // can be used by a theme's CSS selector for styling
+ // See SearchResultKind for the class names.
+ listItem.classList.add(`kind-${kind}`);
+ let requestUrl;
+ let linkUrl;
+ if (docBuilder === "dirhtml") {
+ // dirhtml builder
+ let dirname = docName + "/";
+ if (dirname.match(/\/index\/$/))
+ dirname = dirname.substring(0, dirname.length - 6);
+ else if (dirname === "index/") dirname = "";
+ requestUrl = contentRoot + dirname;
+ linkUrl = requestUrl;
+ } else {
+ // normal html builders
+ requestUrl = contentRoot + docName + docFileSuffix;
+ linkUrl = docName + docLinkSuffix;
+ }
+ let linkEl = listItem.appendChild(document.createElement("a"));
+ linkEl.href = linkUrl + anchor;
+ linkEl.dataset.score = score;
+ linkEl.innerHTML = title;
+ if (descr) {
+ listItem.appendChild(document.createElement("span")).innerHTML =
+ " (" + descr + ")";
+ // highlight search terms in the description
+ if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js
+ highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted"));
+ }
+ else if (showSearchSummary)
+ fetch(requestUrl)
+ .then((responseData) => responseData.text())
+ .then((data) => {
+ if (data)
+ listItem.appendChild(
+ Search.makeSearchSummary(data, searchTerms, anchor)
+ );
+ // highlight search terms in the summary
+ if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js
+ highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted"));
+ });
+ Search.output.appendChild(listItem);
+};
+const _finishSearch = (resultCount) => {
+ Search.stopPulse();
+ Search.title.innerText = _("Search Results");
+ if (!resultCount)
+ Search.status.innerText = Documentation.gettext(
+ "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories."
+ );
+ else
+ Search.status.innerText = Documentation.ngettext(
+ "Search finished, found one page matching the search query.",
+ "Search finished, found ${resultCount} pages matching the search query.",
+ resultCount,
+ ).replace('${resultCount}', resultCount);
+};
+const _displayNextItem = (
+ results,
+ resultCount,
+ searchTerms,
+ highlightTerms,
+) => {
+ // results left, load the summary and display it
+ // this is intended to be dynamic (don't sub resultsCount)
+ if (results.length) {
+ _displayItem(results.pop(), searchTerms, highlightTerms);
+ setTimeout(
+ () => _displayNextItem(results, resultCount, searchTerms, highlightTerms),
+ 5
+ );
+ }
+ // search finished, update title and status message
+ else _finishSearch(resultCount);
+};
+// Helper function used by query() to order search results.
+// Each input is an array of [docname, title, anchor, descr, score, filename, kind].
+// Order the results by score (in opposite order of appearance, since the
+// `_displayNextItem` function uses pop() to retrieve items) and then alphabetically.
+const _orderResultsByScoreThenName = (a, b) => {
+ const leftScore = a[4];
+ const rightScore = b[4];
+ if (leftScore === rightScore) {
+ // same score: sort alphabetically
+ const leftTitle = a[1].toLowerCase();
+ const rightTitle = b[1].toLowerCase();
+ if (leftTitle === rightTitle) return 0;
+ return leftTitle > rightTitle ? -1 : 1; // inverted is intentional
+ }
+ return leftScore > rightScore ? 1 : -1;
+};
+
+/**
+ * Default splitQuery function. Can be overridden in ``sphinx.search`` with a
+ * custom function per language.
+ *
+ * The regular expression works by splitting the string on consecutive characters
+ * that are not Unicode letters, numbers, underscores, or emoji characters.
+ * This is the same as ``\W+`` in Python, preserving the surrogate pair area.
+ */
+if (typeof splitQuery === "undefined") {
+ var splitQuery = (query) => query
+ .split(/[^\p{Letter}\p{Number}_\p{Emoji_Presentation}]+/gu)
+ .filter(term => term) // remove remaining empty strings
+}
+
+/**
+ * Search Module
+ */
+const Search = {
+ _index: null,
+ _queued_query: null,
+ _pulse_status: -1,
+
+ htmlToText: (htmlString, anchor) => {
+ const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html');
+ for (const removalQuery of [".headerlink", "script", "style"]) {
+ htmlElement.querySelectorAll(removalQuery).forEach((el) => { el.remove() });
+ }
+ if (anchor) {
+ const anchorContent = htmlElement.querySelector(`[role="main"] ${anchor}`);
+ if (anchorContent) return anchorContent.textContent;
+
+ console.warn(
+ `Anchored content block not found. Sphinx search tries to obtain it via DOM query '[role=main] ${anchor}'. Check your theme or template.`
+ );
+ }
+
+ // if anchor not specified or not found, fall back to main content
+ const docContent = htmlElement.querySelector('[role="main"]');
+ if (docContent) return docContent.textContent;
+
+ console.warn(
+ "Content block not found. Sphinx search tries to obtain it via DOM query '[role=main]'. Check your theme or template."
+ );
+ return "";
+ },
+
+ init: () => {
+ const query = new URLSearchParams(window.location.search).get("q");
+ document
+ .querySelectorAll('input[name="q"]')
+ .forEach((el) => (el.value = query));
+ if (query) Search.performSearch(query);
+ },
+
+ loadIndex: (url) =>
+ (document.body.appendChild(document.createElement("script")).src = url),
+
+ setIndex: (index) => {
+ Search._index = index;
+ if (Search._queued_query !== null) {
+ const query = Search._queued_query;
+ Search._queued_query = null;
+ Search.query(query);
+ }
+ },
+
+ hasIndex: () => Search._index !== null,
+
+ deferQuery: (query) => (Search._queued_query = query),
+
+ stopPulse: () => (Search._pulse_status = -1),
+
+ startPulse: () => {
+ if (Search._pulse_status >= 0) return;
+
+ const pulse = () => {
+ Search._pulse_status = (Search._pulse_status + 1) % 4;
+ Search.dots.innerText = ".".repeat(Search._pulse_status);
+ if (Search._pulse_status >= 0) window.setTimeout(pulse, 500);
+ };
+ pulse();
+ },
+
+ /**
+ * perform a search for something (or wait until index is loaded)
+ */
+ performSearch: (query) => {
+ // create the required interface elements
+ const searchText = document.createElement("h2");
+ searchText.textContent = _("Searching");
+ const searchSummary = document.createElement("p");
+ searchSummary.classList.add("search-summary");
+ searchSummary.innerText = "";
+ const searchList = document.createElement("ul");
+ searchList.setAttribute("role", "list");
+ searchList.classList.add("search");
+
+ const out = document.getElementById("search-results");
+ Search.title = out.appendChild(searchText);
+ Search.dots = Search.title.appendChild(document.createElement("span"));
+ Search.status = out.appendChild(searchSummary);
+ Search.output = out.appendChild(searchList);
+
+ const searchProgress = document.getElementById("search-progress");
+ // Some themes don't use the search progress node
+ if (searchProgress) {
+ searchProgress.innerText = _("Preparing search...");
+ }
+ Search.startPulse();
+
+ // index already loaded, the browser was quick!
+ if (Search.hasIndex()) Search.query(query);
+ else Search.deferQuery(query);
+ },
+
+ _parseQuery: (query) => {
+ // stem the search terms and add them to the correct list
+ const stemmer = new Stemmer();
+ const searchTerms = new Set();
+ const excludedTerms = new Set();
+ const highlightTerms = new Set();
+ const objectTerms = new Set(splitQuery(query.toLowerCase().trim()));
+ splitQuery(query.trim()).forEach((queryTerm) => {
+ const queryTermLower = queryTerm.toLowerCase();
+
+ // maybe skip this "word"
+ // stopwords array is from language_data.js
+ if (
+ stopwords.indexOf(queryTermLower) !== -1 ||
+ queryTerm.match(/^\d+$/)
+ )
+ return;
+
+ // stem the word
+ let word = stemmer.stemWord(queryTermLower);
+ // select the correct list
+ if (word[0] === "-") excludedTerms.add(word.substr(1));
+ else {
+ searchTerms.add(word);
+ highlightTerms.add(queryTermLower);
+ }
+ });
+
+ if (SPHINX_HIGHLIGHT_ENABLED) { // set in sphinx_highlight.js
+ localStorage.setItem("sphinx_highlight_terms", [...highlightTerms].join(" "))
+ }
+
+ // console.debug("SEARCH: searching for:");
+ // console.info("required: ", [...searchTerms]);
+ // console.info("excluded: ", [...excludedTerms]);
+
+ return [query, searchTerms, excludedTerms, highlightTerms, objectTerms];
+ },
+
+ /**
+ * execute search (requires search index to be loaded)
+ */
+ _performSearch: (query, searchTerms, excludedTerms, highlightTerms, objectTerms) => {
+ const filenames = Search._index.filenames;
+ const docNames = Search._index.docnames;
+ const titles = Search._index.titles;
+ const allTitles = Search._index.alltitles;
+ const indexEntries = Search._index.indexentries;
+
+ // Collect multiple result groups to be sorted separately and then ordered.
+ // Each is an array of [docname, title, anchor, descr, score, filename, kind].
+ const normalResults = [];
+ const nonMainIndexResults = [];
+
+ _removeChildren(document.getElementById("search-progress"));
+
+ const queryLower = query.toLowerCase().trim();
+ for (const [title, foundTitles] of Object.entries(allTitles)) {
+ if (title.toLowerCase().trim().includes(queryLower) && (queryLower.length >= title.length/2)) {
+ for (const [file, id] of foundTitles) {
+ const score = Math.round(Scorer.title * queryLower.length / title.length);
+ const boost = titles[file] === title ? 1 : 0; // add a boost for document titles
+ normalResults.push([
+ docNames[file],
+ titles[file] !== title ? `${titles[file]} > ${title}` : title,
+ id !== null ? "#" + id : "",
+ null,
+ score + boost,
+ filenames[file],
+ SearchResultKind.title,
+ ]);
+ }
+ }
+ }
+
+ // search for explicit entries in index directives
+ for (const [entry, foundEntries] of Object.entries(indexEntries)) {
+ if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) {
+ for (const [file, id, isMain] of foundEntries) {
+ const score = Math.round(100 * queryLower.length / entry.length);
+ const result = [
+ docNames[file],
+ titles[file],
+ id ? "#" + id : "",
+ null,
+ score,
+ filenames[file],
+ SearchResultKind.index,
+ ];
+ if (isMain) {
+ normalResults.push(result);
+ } else {
+ nonMainIndexResults.push(result);
+ }
+ }
+ }
+ }
+
+ // lookup as object
+ objectTerms.forEach((term) =>
+ normalResults.push(...Search.performObjectSearch(term, objectTerms))
+ );
+
+ // lookup as search terms in fulltext
+ normalResults.push(...Search.performTermsSearch(searchTerms, excludedTerms));
+
+ // let the scorer override scores with a custom scoring function
+ if (Scorer.score) {
+ normalResults.forEach((item) => (item[4] = Scorer.score(item)));
+ nonMainIndexResults.forEach((item) => (item[4] = Scorer.score(item)));
+ }
+
+ // Sort each group of results by score and then alphabetically by name.
+ normalResults.sort(_orderResultsByScoreThenName);
+ nonMainIndexResults.sort(_orderResultsByScoreThenName);
+
+ // Combine the result groups in (reverse) order.
+ // Non-main index entries are typically arbitrary cross-references,
+ // so display them after other results.
+ let results = [...nonMainIndexResults, ...normalResults];
+
+ // remove duplicate search results
+ // note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept
+ let seen = new Set();
+ results = results.reverse().reduce((acc, result) => {
+ let resultStr = result.slice(0, 4).concat([result[5]]).map(v => String(v)).join(',');
+ if (!seen.has(resultStr)) {
+ acc.push(result);
+ seen.add(resultStr);
+ }
+ return acc;
+ }, []);
+
+ return results.reverse();
+ },
+
+ query: (query) => {
+ const [searchQuery, searchTerms, excludedTerms, highlightTerms, objectTerms] = Search._parseQuery(query);
+ const results = Search._performSearch(searchQuery, searchTerms, excludedTerms, highlightTerms, objectTerms);
+
+ // for debugging
+ //Search.lastresults = results.slice(); // a copy
+ // console.info("search results:", Search.lastresults);
+
+ // print the results
+ _displayNextItem(results, results.length, searchTerms, highlightTerms);
+ },
+
+ /**
+ * search for object names
+ */
+ performObjectSearch: (object, objectTerms) => {
+ const filenames = Search._index.filenames;
+ const docNames = Search._index.docnames;
+ const objects = Search._index.objects;
+ const objNames = Search._index.objnames;
+ const titles = Search._index.titles;
+
+ const results = [];
+
+ const objectSearchCallback = (prefix, match) => {
+ const name = match[4]
+ const fullname = (prefix ? prefix + "." : "") + name;
+ const fullnameLower = fullname.toLowerCase();
+ if (fullnameLower.indexOf(object) < 0) return;
+
+ let score = 0;
+ const parts = fullnameLower.split(".");
+
+ // check for different match types: exact matches of full name or
+ // "last name" (i.e. last dotted part)
+ if (fullnameLower === object || parts.slice(-1)[0] === object)
+ score += Scorer.objNameMatch;
+ else if (parts.slice(-1)[0].indexOf(object) > -1)
+ score += Scorer.objPartialMatch; // matches in last name
+
+ const objName = objNames[match[1]][2];
+ const title = titles[match[0]];
+
+ // If more than one term searched for, we require other words to be
+ // found in the name/title/description
+ const otherTerms = new Set(objectTerms);
+ otherTerms.delete(object);
+ if (otherTerms.size > 0) {
+ const haystack = `${prefix} ${name} ${objName} ${title}`.toLowerCase();
+ if (
+ [...otherTerms].some((otherTerm) => haystack.indexOf(otherTerm) < 0)
+ )
+ return;
+ }
+
+ let anchor = match[3];
+ if (anchor === "") anchor = fullname;
+ else if (anchor === "-") anchor = objNames[match[1]][1] + "-" + fullname;
+
+ const descr = objName + _(", in ") + title;
+
+ // add custom score for some objects according to scorer
+ if (Scorer.objPrio.hasOwnProperty(match[2]))
+ score += Scorer.objPrio[match[2]];
+ else score += Scorer.objPrioDefault;
+
+ results.push([
+ docNames[match[0]],
+ fullname,
+ "#" + anchor,
+ descr,
+ score,
+ filenames[match[0]],
+ SearchResultKind.object,
+ ]);
+ };
+ Object.keys(objects).forEach((prefix) =>
+ objects[prefix].forEach((array) =>
+ objectSearchCallback(prefix, array)
+ )
+ );
+ return results;
+ },
+
+ /**
+ * search for full-text terms in the index
+ */
+ performTermsSearch: (searchTerms, excludedTerms) => {
+ // prepare search
+ const terms = Search._index.terms;
+ const titleTerms = Search._index.titleterms;
+ const filenames = Search._index.filenames;
+ const docNames = Search._index.docnames;
+ const titles = Search._index.titles;
+
+ const scoreMap = new Map();
+ const fileMap = new Map();
+
+ // perform the search on the required terms
+ searchTerms.forEach((word) => {
+ const files = [];
+ const arr = [
+ { files: terms[word], score: Scorer.term },
+ { files: titleTerms[word], score: Scorer.title },
+ ];
+ // add support for partial matches
+ if (word.length > 2) {
+ const escapedWord = _escapeRegExp(word);
+ if (!terms.hasOwnProperty(word)) {
+ Object.keys(terms).forEach((term) => {
+ if (term.match(escapedWord))
+ arr.push({ files: terms[term], score: Scorer.partialTerm });
+ });
+ }
+ if (!titleTerms.hasOwnProperty(word)) {
+ Object.keys(titleTerms).forEach((term) => {
+ if (term.match(escapedWord))
+ arr.push({ files: titleTerms[term], score: Scorer.partialTitle });
+ });
+ }
+ }
+
+ // no match but word was a required one
+ if (arr.every((record) => record.files === undefined)) return;
+
+ // found search word in contents
+ arr.forEach((record) => {
+ if (record.files === undefined) return;
+
+ let recordFiles = record.files;
+ if (recordFiles.length === undefined) recordFiles = [recordFiles];
+ files.push(...recordFiles);
+
+ // set score for the word in each file
+ recordFiles.forEach((file) => {
+ if (!scoreMap.has(file)) scoreMap.set(file, {});
+ scoreMap.get(file)[word] = record.score;
+ });
+ });
+
+ // create the mapping
+ files.forEach((file) => {
+ if (!fileMap.has(file)) fileMap.set(file, [word]);
+ else if (fileMap.get(file).indexOf(word) === -1) fileMap.get(file).push(word);
+ });
+ });
+
+ // now check if the files don't contain excluded terms
+ const results = [];
+ for (const [file, wordList] of fileMap) {
+ // check if all requirements are matched
+
+ // as search terms with length < 3 are discarded
+ const filteredTermCount = [...searchTerms].filter(
+ (term) => term.length > 2
+ ).length;
+ if (
+ wordList.length !== searchTerms.size &&
+ wordList.length !== filteredTermCount
+ )
+ continue;
+
+ // ensure that none of the excluded terms is in the search result
+ if (
+ [...excludedTerms].some(
+ (term) =>
+ terms[term] === file ||
+ titleTerms[term] === file ||
+ (terms[term] || []).includes(file) ||
+ (titleTerms[term] || []).includes(file)
+ )
+ )
+ break;
+
+ // select one (max) score for the file.
+ const score = Math.max(...wordList.map((w) => scoreMap.get(file)[w]));
+ // add result to the result list
+ results.push([
+ docNames[file],
+ titles[file],
+ "",
+ null,
+ score,
+ filenames[file],
+ SearchResultKind.text,
+ ]);
+ }
+ return results;
+ },
+
+ /**
+ * helper function to return a node containing the
+ * search summary for a given text. keywords is a list
+ * of stemmed words.
+ */
+ makeSearchSummary: (htmlText, keywords, anchor) => {
+ const text = Search.htmlToText(htmlText, anchor);
+ if (text === "") return null;
+
+ const textLower = text.toLowerCase();
+ const actualStartPosition = [...keywords]
+ .map((k) => textLower.indexOf(k.toLowerCase()))
+ .filter((i) => i > -1)
+ .slice(-1)[0];
+ const startWithContext = Math.max(actualStartPosition - 120, 0);
+
+ const top = startWithContext === 0 ? "" : "...";
+ const tail = startWithContext + 240 < text.length ? "..." : "";
+
+ let summary = document.createElement("p");
+ summary.classList.add("context");
+ summary.textContent = top + text.substr(startWithContext, 240).trim() + tail;
+
+ return summary;
+ },
+};
+
+_ready(Search.init);
diff --git a/docs/_static/sphinx_highlight.js b/docs/_static/sphinx_highlight.js
new file mode 100644
index 0000000..8a96c69
--- /dev/null
+++ b/docs/_static/sphinx_highlight.js
@@ -0,0 +1,154 @@
+/* Highlighting utilities for Sphinx HTML documentation. */
+"use strict";
+
+const SPHINX_HIGHLIGHT_ENABLED = true
+
+/**
+ * highlight a given string on a node by wrapping it in
+ * span elements with the given class name.
+ */
+const _highlight = (node, addItems, text, className) => {
+ if (node.nodeType === Node.TEXT_NODE) {
+ const val = node.nodeValue;
+ const parent = node.parentNode;
+ const pos = val.toLowerCase().indexOf(text);
+ if (
+ pos >= 0 &&
+ !parent.classList.contains(className) &&
+ !parent.classList.contains("nohighlight")
+ ) {
+ let span;
+
+ const closestNode = parent.closest("body, svg, foreignObject");
+ const isInSVG = closestNode && closestNode.matches("svg");
+ if (isInSVG) {
+ span = document.createElementNS("http://www.w3.org/2000/svg", "tspan");
+ } else {
+ span = document.createElement("span");
+ span.classList.add(className);
+ }
+
+ span.appendChild(document.createTextNode(val.substr(pos, text.length)));
+ const rest = document.createTextNode(val.substr(pos + text.length));
+ parent.insertBefore(
+ span,
+ parent.insertBefore(
+ rest,
+ node.nextSibling
+ )
+ );
+ node.nodeValue = val.substr(0, pos);
+ /* There may be more occurrences of search term in this node. So call this
+ * function recursively on the remaining fragment.
+ */
+ _highlight(rest, addItems, text, className);
+
+ if (isInSVG) {
+ const rect = document.createElementNS(
+ "http://www.w3.org/2000/svg",
+ "rect"
+ );
+ const bbox = parent.getBBox();
+ rect.x.baseVal.value = bbox.x;
+ rect.y.baseVal.value = bbox.y;
+ rect.width.baseVal.value = bbox.width;
+ rect.height.baseVal.value = bbox.height;
+ rect.setAttribute("class", className);
+ addItems.push({ parent: parent, target: rect });
+ }
+ }
+ } else if (node.matches && !node.matches("button, select, textarea")) {
+ node.childNodes.forEach((el) => _highlight(el, addItems, text, className));
+ }
+};
+const _highlightText = (thisNode, text, className) => {
+ let addItems = [];
+ _highlight(thisNode, addItems, text, className);
+ addItems.forEach((obj) =>
+ obj.parent.insertAdjacentElement("beforebegin", obj.target)
+ );
+};
+
+/**
+ * Small JavaScript module for the documentation.
+ */
+const SphinxHighlight = {
+
+ /**
+ * highlight the search words provided in localstorage in the text
+ */
+ highlightSearchWords: () => {
+ if (!SPHINX_HIGHLIGHT_ENABLED) return; // bail if no highlight
+
+ // get and clear terms from localstorage
+ const url = new URL(window.location);
+ const highlight =
+ localStorage.getItem("sphinx_highlight_terms")
+ || url.searchParams.get("highlight")
+ || "";
+ localStorage.removeItem("sphinx_highlight_terms")
+ url.searchParams.delete("highlight");
+ window.history.replaceState({}, "", url);
+
+ // get individual terms from highlight string
+ const terms = highlight.toLowerCase().split(/\s+/).filter(x => x);
+ if (terms.length === 0) return; // nothing to do
+
+ // There should never be more than one element matching "div.body"
+ const divBody = document.querySelectorAll("div.body");
+ const body = divBody.length ? divBody[0] : document.querySelector("body");
+ window.setTimeout(() => {
+ terms.forEach((term) => _highlightText(body, term, "highlighted"));
+ }, 10);
+
+ const searchBox = document.getElementById("searchbox");
+ if (searchBox === null) return;
+ searchBox.appendChild(
+ document
+ .createRange()
+ .createContextualFragment(
+ '
' +
+ '' +
+ _("Hide Search Matches") +
+ "
"
+ )
+ );
+ },
+
+ /**
+ * helper function to hide the search marks again
+ */
+ hideSearchWords: () => {
+ document
+ .querySelectorAll("#searchbox .highlight-link")
+ .forEach((el) => el.remove());
+ document
+ .querySelectorAll("span.highlighted")
+ .forEach((el) => el.classList.remove("highlighted"));
+ localStorage.removeItem("sphinx_highlight_terms")
+ },
+
+ initEscapeListener: () => {
+ // only install a listener if it is really needed
+ if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return;
+
+ document.addEventListener("keydown", (event) => {
+ // bail for input elements
+ if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return;
+ // bail with special keys
+ if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return;
+ if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) {
+ SphinxHighlight.hideSearchWords();
+ event.preventDefault();
+ }
+ });
+ },
+};
+
+_ready(() => {
+ /* Do not call highlightSearchWords() when we are on the search page.
+ * It will highlight words from the *previous* search query.
+ */
+ if (typeof Search === "undefined") SphinxHighlight.highlightSearchWords();
+ SphinxHighlight.initEscapeListener();
+});
diff --git a/docs/genindex.html b/docs/genindex.html
new file mode 100644
index 0000000..7fe594b
--- /dev/null
+++ b/docs/genindex.html
@@ -0,0 +1,406 @@
+
+
+
+
+
+
+
+
Index — sample-annotator 0.0.1 documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ sample-annotator
+
+
+
+
+
+
+
+
+
+
Index
+
+
+
A
+ |
B
+ |
C
+ |
D
+ |
E
+ |
F
+ |
G
+ |
I
+ |
L
+ |
M
+ |
O
+ |
P
+ |
R
+ |
S
+ |
T
+ |
U
+ |
V
+ |
W
+
+
+
A
+
+
+
B
+
+
+
C
+
+
+
D
+
+
+
E
+
+
+
F
+
+
+
G
+
+
+
I
+
+
+
L
+
+
+
M
+
+
+
O
+
+
+
P
+
+
+
R
+
+
+
S
+
+
+
+ sample_annotator
+
+
+
+ sample_annotator.report_model
+
+
+
+ sample_annotator.sample_annotator
+
+
+
+
+
+
+
T
+
+
+
U
+
+
+
V
+
+
+
W
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/index.html b/docs/index.html
new file mode 100644
index 0000000..8183b48
--- /dev/null
+++ b/docs/index.html
@@ -0,0 +1,136 @@
+
+
+
+
+
+
+
+
+
Welcome to sample-annotator’s documentation! — sample-annotator 0.0.1 documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ sample-annotator
+
+
+
+
+
+
+
+ Welcome to sample-annotator’s documentation!
+
+ Edit on GitHub
+
+
+
+
+
+
+
+
+Welcome to sample-annotator’s documentation!
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/modules.html b/docs/modules.html
new file mode 100644
index 0000000..4a29c77
--- /dev/null
+++ b/docs/modules.html
@@ -0,0 +1,199 @@
+
+
+
+
+
+
+
+
+
sample-annotator — sample-annotator 0.0.1 documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/objects.inv b/docs/objects.inv
new file mode 100644
index 0000000..3eacf6e
--- /dev/null
+++ b/docs/objects.inv
@@ -0,0 +1,7 @@
+# Sphinx inventory version 2
+# Project: sample-annotator
+# Version:
+# The remainder of this file is compressed using zlib.
+xڽXn0+
0753{<9|!l1lJ%i
#>Pis_L2yn-ʏ\J\Q
+7#U<:$]{&9B5o^c^%pj-N;OπD7Y2Uk.3cvcA_#.+$-M˹h
+]ӧ/#֧R&~v3)q|G~⋿0ڤ8_SRlA[=Zb;uBP%qy,>&
=!A],ԇG叩[[3/`FC~}U@훙l\XsfqbɃ#A4L;23L.Uf&ǵ|\),gp~T),:(ujzy3%rR5b~Qf҈hb05TG_?~RhfCzFD]Fa"}K)lϪIt'MƄY5Ӷ$W3n؇?`K$c'd=~u()-fV
\ No newline at end of file
diff --git a/docs/py-modindex.html b/docs/py-modindex.html
new file mode 100644
index 0000000..f9e19b8
--- /dev/null
+++ b/docs/py-modindex.html
@@ -0,0 +1,136 @@
+
+
+
+
+
+
+
+ Python Module Index — sample-annotator 0.0.1 documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ sample-annotator
+
+
+
+
+
+
+
+ Python Module Index
+
+
+
+
+
+
+
+
+
+
Python Module Index
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/sample_annotator.html b/docs/sample_annotator.html
new file mode 100644
index 0000000..1a16493
--- /dev/null
+++ b/docs/sample_annotator.html
@@ -0,0 +1,461 @@
+
+
+
+
+
+
+
+
+ sample_annotator package — sample-annotator 0.0.1 documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ sample-annotator
+
+
+
+
+
+
+
+
+
+sample_annotator package
+
+
+sample_annotator.report_model module
+
+
+class sample_annotator.report_model. AnnotationMultiSampleReport ( reports : List [ AnnotationReport ] | None = None )
+Bases: object
+Multi-report of a set of samples
+
+
+all_outputs ( ) → List [ Dict [ str , Any ] ]
+
+
+
+
+as_dataframe ( )
+
+
+
+
+reports : List [ AnnotationReport ] = None
+
+
+
+
+
+
+class sample_annotator.report_model. AnnotationReport ( messages : List [ Message ] | None = None , package : PackageCombo | None = None , input : Dict [ str , Any ] | None = None , output : Dict [ str , Any ] | None = None , sample_id : str | None = None )
+Bases: object
+Annotation report for a single sample
+
+
+add_message ( * args , ** kwargs )
+
+
+
+
+annotation_sufficiency_score = 0.0
+
+
+
+
+as_dataframe ( )
+
+
+
+
+input : Dict [ str , Any ] = None
+
+
+
+
+max_severity ( )
+
+
+
+
+messages : List [ Message ] = None
+
+
+
+
+messages_by_category ( ) → Dict
+
+
+
+
+output : Dict [ str , Any ] = None
+
+
+
+
+package : PackageCombo = None
+
+
+
+
+passes ( )
+
+
+
+
+sample_id : str = None
+
+
+
+
+
+
+class sample_annotator.report_model. Category ( value )
+Bases: Enum
+An enumeration.
+
+
+BadNull = 'bad-null'
+
+
+
+
+ControlledVocabulary = 'controlled-vocabulary'
+
+
+
+
+Core = 'core'
+
+
+
+
+Geo = 'geo'
+
+
+
+
+Identifier = 'identifier'
+
+
+
+
+Inapplicable = 'inapplicable'
+
+
+
+
+MeasurementSyntax = 'measurement-syntax'
+
+
+
+
+MissingCore = 'missing-core'
+
+
+
+
+Unclassified = 'unclassified'
+
+
+
+
+Units = 'units'
+
+
+
+
+UnknownField = 'unknown-field'
+
+
+
+
+static list ( )
+
+
+
+
+
+
+class sample_annotator.report_model. Message ( description : str | None = None , severity : int = 1 , was_repaired : bool | None = None , category : Category = Category.Unclassified , field : str | None = None )
+Bases: object
+Individual report message
+
+
+as_dict ( ) → Dict
+
+
+
+
+category : Category = 'unclassified'
+
+
+
+
+description : str = None
+
+
+
+
+field : str = None
+
+
+
+
+severity : int = 1
+
+
+
+
+was_repaired : bool = None
+
+
+
+
+
+
+class sample_annotator.report_model. PackageCombo ( environmental_package : str | None = None , checklist : str | None = None )
+Bases: object
+Tuple of environmental package and checklist
+
+
+checklist : str = None
+
+
+
+
+environmental_package : str = None
+
+
+
+
+
+
+sample_annotator.sample_annotator module
+
+
+class sample_annotator.sample_annotator. SampleAnnotator ( target_class : ClassDefinition | None = None , geoengine : GeoEngine = GeoEngine(googlemaps_api_key=None) , measurement_engine : MeasurementEngine = MeasurementEngine() , schema : SampleSchema = SampleSchema(object=None) )
+Bases: object
+TODO
+
+
+annotate ( sample : Dict [ str , Any ] , study : Dict [ str , Any ] | None = None ) → AnnotationReport
+Annotate a sample
+Returns an AnnotationReport object that includes a transformed sample representation,
+plus reports of all errors/warnings found, and repairs made
+Performs a sequential series of tidy activities. Each report
+
+
+
+
+annotate_all ( samples : List [ Dict [ str , Any ] ] , study : Dict [ str , Any ] | None = None ) → AnnotationMultiSampleReport
+Annotate a list of samples
+
+
+
+
+geoengine : GeoEngine = GeoEngine(googlemaps_api_key=None)
+
+
+
+
+infer_package ( sample : Dict [ str , Any ] , report : AnnotationReport )
+Infer the environment package / checklist combo, either
+from directly asserted fields, or other means
+
+
+
+
+measurement_engine : MeasurementEngine = MeasurementEngine()
+
+
+
+
+perform_geolocation_inference ( sample : Dict [ str , Any ] , report : AnnotationReport )
+Performs inference using geolocation information
+
+
+
+
+perform_inference ( sample : Dict [ str , Any ] , report : AnnotationReport )
+Performs Machine Learning inference
+
+
+
+
+perform_text_mining ( sample : Dict [ str , Any ] , report : AnnotationReport )
+Performs text mining
+
+
+
+
+schema : SampleSchema = SampleSchema(object=None)
+
+
+
+
+target_class : ClassDefinition = None
+
+
+
+
+tidy_enumerations ( sample : Dict [ str , Any ] , report : AnnotationReport )
+Tidies measurement fields
+
+
+
+
+tidy_keys ( sample : Dict [ str , Any ] , report : AnnotationReport )
+Performs tidying on all keys/fields/slots in the sample dictionary
+
+
+
+
+
+
+tidy_measurements ( sample : Dict [ str , Any ] , report : AnnotationReport )
+Tidies measurement fields
+
+
+
+
+tidy_nulls ( sample : Dict [ str , Any ] , report : AnnotationReport )
+Normalizes to EBI standard null values
+https://ena-docs.readthedocs.io/en/latest/submit/samples/missing-values.html
+
+
+
+
+validate_identifier ( sample : Dict [ str , Any ] , report : AnnotationReport )
+
+
+
+
+
+
+sample_annotator.sample_utils module
+
+
+sample_annotator.sample_utils. create_tests ( samples : List [ Dict [ str , Any ] ] )
+Takes normalized samples and uses this to create tests
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/search.html b/docs/search.html
new file mode 100644
index 0000000..4c23473
--- /dev/null
+++ b/docs/search.html
@@ -0,0 +1,121 @@
+
+
+
+
+
+
+
+ Search — sample-annotator 0.0.1 documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ sample-annotator
+
+
+
+
+
+
+
+
+
+
+
+ Please activate JavaScript to enable the search functionality.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/searchindex.js b/docs/searchindex.js
new file mode 100644
index 0000000..a03631e
--- /dev/null
+++ b/docs/searchindex.js
@@ -0,0 +1 @@
+Search.setIndex({"alltitles": {"API Docs": [[3, "api-docs"]], "Command Line": [[3, "command-line"]], "Contents:": [[0, null]], "Indices and tables": [[0, "indices-and-tables"]], "Installing": [[3, "installing"]], "Module contents": [[2, "module-sample_annotator"]], "Modules": [[3, "modules"]], "NMDC Sample Annotator API": [[3, null]], "Schema Validation": [[3, "schema-validation"]], "Starting the web API": [[3, "starting-the-web-api"]], "Submodules": [[2, "submodules"]], "Testing": [[3, "testing"]], "Validation reports": [[3, "validation-reports"]], "Welcome to sample-annotator\u2019s documentation!": [[0, null]], "What is it?": [[3, "what-is-it"]], "pipenv": [[3, "pipenv"]], "sample-annotator": [[1, null]], "sample_annotator package": [[2, null]], "sample_annotator.report_model module": [[2, "module-sample_annotator.report_model"]], "sample_annotator.sample_annotator module": [[2, "module-sample_annotator.sample_annotator"]], "sample_annotator.sample_utils module": [[2, "module-sample_annotator.sample_utils"]], "venv": [[3, "venv"]]}, "docnames": ["index", "modules", "sample_annotator", "static/intro"], "envversion": {"sphinx": 64, "sphinx.domains.c": 3, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 9, "sphinx.domains.index": 1, "sphinx.domains.javascript": 3, "sphinx.domains.math": 2, "sphinx.domains.python": 4, "sphinx.domains.rst": 2, "sphinx.domains.std": 2}, "filenames": ["index.rst", "modules.rst", "sample_annotator.rst", "static/intro.md"], "indexentries": {"add_message() (sample_annotator.report_model.annotationreport method)": [[2, "sample_annotator.report_model.AnnotationReport.add_message", false]], "all_outputs() (sample_annotator.report_model.annotationmultisamplereport method)": [[2, "sample_annotator.report_model.AnnotationMultiSampleReport.all_outputs", false]], "annotate() (sample_annotator.sample_annotator.sampleannotator method)": [[2, "sample_annotator.sample_annotator.SampleAnnotator.annotate", false]], "annotate_all() (sample_annotator.sample_annotator.sampleannotator method)": [[2, "sample_annotator.sample_annotator.SampleAnnotator.annotate_all", false]], "annotation_sufficiency_score (sample_annotator.report_model.annotationreport attribute)": [[2, "sample_annotator.report_model.AnnotationReport.annotation_sufficiency_score", false]], "annotationmultisamplereport (class in sample_annotator.report_model)": [[2, "sample_annotator.report_model.AnnotationMultiSampleReport", false]], "annotationreport (class in sample_annotator.report_model)": [[2, "sample_annotator.report_model.AnnotationReport", false]], "as_dataframe() (sample_annotator.report_model.annotationmultisamplereport method)": [[2, "sample_annotator.report_model.AnnotationMultiSampleReport.as_dataframe", false]], "as_dataframe() (sample_annotator.report_model.annotationreport method)": [[2, "sample_annotator.report_model.AnnotationReport.as_dataframe", false]], "as_dict() (sample_annotator.report_model.message method)": [[2, "sample_annotator.report_model.Message.as_dict", false]], "badnull (sample_annotator.report_model.category attribute)": [[2, "sample_annotator.report_model.Category.BadNull", false]], "category (class in sample_annotator.report_model)": [[2, "sample_annotator.report_model.Category", false]], "category (sample_annotator.report_model.message attribute)": [[2, "sample_annotator.report_model.Message.category", false]], "checklist (sample_annotator.report_model.packagecombo attribute)": [[2, "sample_annotator.report_model.PackageCombo.checklist", false]], "controlledvocabulary (sample_annotator.report_model.category attribute)": [[2, "sample_annotator.report_model.Category.ControlledVocabulary", false]], "core (sample_annotator.report_model.category attribute)": [[2, "sample_annotator.report_model.Category.Core", false]], "create_tests() (in module sample_annotator.sample_utils)": [[2, "sample_annotator.sample_utils.create_tests", false]], "description (sample_annotator.report_model.message attribute)": [[2, "sample_annotator.report_model.Message.description", false]], "environmental_package (sample_annotator.report_model.packagecombo attribute)": [[2, "sample_annotator.report_model.PackageCombo.environmental_package", false]], "field (sample_annotator.report_model.message attribute)": [[2, "sample_annotator.report_model.Message.field", false]], "geo (sample_annotator.report_model.category attribute)": [[2, "sample_annotator.report_model.Category.Geo", false]], "geoengine (sample_annotator.sample_annotator.sampleannotator attribute)": [[2, "sample_annotator.sample_annotator.SampleAnnotator.geoengine", false]], "identifier (sample_annotator.report_model.category attribute)": [[2, "sample_annotator.report_model.Category.Identifier", false]], "inapplicable (sample_annotator.report_model.category attribute)": [[2, "sample_annotator.report_model.Category.Inapplicable", false]], "infer_package() (sample_annotator.sample_annotator.sampleannotator method)": [[2, "sample_annotator.sample_annotator.SampleAnnotator.infer_package", false]], "input (sample_annotator.report_model.annotationreport attribute)": [[2, "sample_annotator.report_model.AnnotationReport.input", false]], "list() (sample_annotator.report_model.category static method)": [[2, "sample_annotator.report_model.Category.list", false]], "max_severity() (sample_annotator.report_model.annotationreport method)": [[2, "sample_annotator.report_model.AnnotationReport.max_severity", false]], "measurement_engine (sample_annotator.sample_annotator.sampleannotator attribute)": [[2, "sample_annotator.sample_annotator.SampleAnnotator.measurement_engine", false]], "measurementsyntax (sample_annotator.report_model.category attribute)": [[2, "sample_annotator.report_model.Category.MeasurementSyntax", false]], "message (class in sample_annotator.report_model)": [[2, "sample_annotator.report_model.Message", false]], "messages (sample_annotator.report_model.annotationreport attribute)": [[2, "sample_annotator.report_model.AnnotationReport.messages", false]], "messages_by_category() (sample_annotator.report_model.annotationreport method)": [[2, "sample_annotator.report_model.AnnotationReport.messages_by_category", false]], "missingcore (sample_annotator.report_model.category attribute)": [[2, "sample_annotator.report_model.Category.MissingCore", false]], "module": [[2, "module-sample_annotator", false], [2, "module-sample_annotator.report_model", false], [2, "module-sample_annotator.sample_annotator", false], [2, "module-sample_annotator.sample_utils", false]], "output (sample_annotator.report_model.annotationreport attribute)": [[2, "sample_annotator.report_model.AnnotationReport.output", false]], "package (sample_annotator.report_model.annotationreport attribute)": [[2, "sample_annotator.report_model.AnnotationReport.package", false]], "packagecombo (class in sample_annotator.report_model)": [[2, "sample_annotator.report_model.PackageCombo", false]], "passes() (sample_annotator.report_model.annotationreport method)": [[2, "sample_annotator.report_model.AnnotationReport.passes", false]], "perform_geolocation_inference() (sample_annotator.sample_annotator.sampleannotator method)": [[2, "sample_annotator.sample_annotator.SampleAnnotator.perform_geolocation_inference", false]], "perform_inference() (sample_annotator.sample_annotator.sampleannotator method)": [[2, "sample_annotator.sample_annotator.SampleAnnotator.perform_inference", false]], "perform_text_mining() (sample_annotator.sample_annotator.sampleannotator method)": [[2, "sample_annotator.sample_annotator.SampleAnnotator.perform_text_mining", false]], "reports (sample_annotator.report_model.annotationmultisamplereport attribute)": [[2, "sample_annotator.report_model.AnnotationMultiSampleReport.reports", false]], "sample_annotator": [[2, "module-sample_annotator", false]], "sample_annotator.report_model": [[2, "module-sample_annotator.report_model", false]], "sample_annotator.sample_annotator": [[2, "module-sample_annotator.sample_annotator", false]], "sample_annotator.sample_utils": [[2, "module-sample_annotator.sample_utils", false]], "sample_id (sample_annotator.report_model.annotationreport attribute)": [[2, "sample_annotator.report_model.AnnotationReport.sample_id", false]], "sampleannotator (class in sample_annotator.sample_annotator)": [[2, "sample_annotator.sample_annotator.SampleAnnotator", false]], "schema (sample_annotator.sample_annotator.sampleannotator attribute)": [[2, "sample_annotator.sample_annotator.SampleAnnotator.schema", false]], "severity (sample_annotator.report_model.message attribute)": [[2, "sample_annotator.report_model.Message.severity", false]], "target_class (sample_annotator.sample_annotator.sampleannotator attribute)": [[2, "sample_annotator.sample_annotator.SampleAnnotator.target_class", false]], "tidy_enumerations() (sample_annotator.sample_annotator.sampleannotator method)": [[2, "sample_annotator.sample_annotator.SampleAnnotator.tidy_enumerations", false]], "tidy_keys() (sample_annotator.sample_annotator.sampleannotator method)": [[2, "sample_annotator.sample_annotator.SampleAnnotator.tidy_keys", false]], "tidy_measurements() (sample_annotator.sample_annotator.sampleannotator method)": [[2, "sample_annotator.sample_annotator.SampleAnnotator.tidy_measurements", false]], "tidy_nulls() (sample_annotator.sample_annotator.sampleannotator method)": [[2, "sample_annotator.sample_annotator.SampleAnnotator.tidy_nulls", false]], "unclassified (sample_annotator.report_model.category attribute)": [[2, "sample_annotator.report_model.Category.Unclassified", false]], "units (sample_annotator.report_model.category attribute)": [[2, "sample_annotator.report_model.Category.Units", false]], "unknownfield (sample_annotator.report_model.category attribute)": [[2, "sample_annotator.report_model.Category.UnknownField", false]], "validate_identifier() (sample_annotator.sample_annotator.sampleannotator method)": [[2, "sample_annotator.sample_annotator.SampleAnnotator.validate_identifier", false]], "was_repaired (sample_annotator.report_model.message attribute)": [[2, "sample_annotator.report_model.Message.was_repaired", false]]}, "objects": {"": [[2, 0, 0, "-", "sample_annotator"]], "sample_annotator": [[2, 0, 0, "-", "report_model"], [2, 0, 0, "-", "sample_annotator"], [2, 0, 0, "-", "sample_utils"]], "sample_annotator.report_model": [[2, 1, 1, "", "AnnotationMultiSampleReport"], [2, 1, 1, "", "AnnotationReport"], [2, 1, 1, "", "Category"], [2, 1, 1, "", "Message"], [2, 1, 1, "", "PackageCombo"]], "sample_annotator.report_model.AnnotationMultiSampleReport": [[2, 2, 1, "", "all_outputs"], [2, 2, 1, "", "as_dataframe"], [2, 3, 1, "", "reports"]], "sample_annotator.report_model.AnnotationReport": [[2, 2, 1, "", "add_message"], [2, 3, 1, "", "annotation_sufficiency_score"], [2, 2, 1, "", "as_dataframe"], [2, 3, 1, "", "input"], [2, 2, 1, "", "max_severity"], [2, 3, 1, "", "messages"], [2, 2, 1, "", "messages_by_category"], [2, 3, 1, "", "output"], [2, 3, 1, "", "package"], [2, 2, 1, "", "passes"], [2, 3, 1, "", "sample_id"]], "sample_annotator.report_model.Category": [[2, 3, 1, "", "BadNull"], [2, 3, 1, "", "ControlledVocabulary"], [2, 3, 1, "", "Core"], [2, 3, 1, "", "Geo"], [2, 3, 1, "", "Identifier"], [2, 3, 1, "", "Inapplicable"], [2, 3, 1, "", "MeasurementSyntax"], [2, 3, 1, "", "MissingCore"], [2, 3, 1, "", "Unclassified"], [2, 3, 1, "", "Units"], [2, 3, 1, "", "UnknownField"], [2, 2, 1, "", "list"]], "sample_annotator.report_model.Message": [[2, 2, 1, "", "as_dict"], [2, 3, 1, "", "category"], [2, 3, 1, "", "description"], [2, 3, 1, "", "field"], [2, 3, 1, "", "severity"], [2, 3, 1, "", "was_repaired"]], "sample_annotator.report_model.PackageCombo": [[2, 3, 1, "", "checklist"], [2, 3, 1, "", "environmental_package"]], "sample_annotator.sample_annotator": [[2, 1, 1, "", "SampleAnnotator"]], "sample_annotator.sample_annotator.SampleAnnotator": [[2, 2, 1, "", "annotate"], [2, 2, 1, "", "annotate_all"], [2, 3, 1, "", "geoengine"], [2, 2, 1, "", "infer_package"], [2, 3, 1, "", "measurement_engine"], [2, 2, 1, "", "perform_geolocation_inference"], [2, 2, 1, "", "perform_inference"], [2, 2, 1, "", "perform_text_mining"], [2, 3, 1, "", "schema"], [2, 3, 1, "", "target_class"], [2, 2, 1, "", "tidy_enumerations"], [2, 2, 1, "", "tidy_keys"], [2, 2, 1, "", "tidy_measurements"], [2, 2, 1, "", "tidy_nulls"], [2, 2, 1, "", "validate_identifier"]], "sample_annotator.sample_utils": [[2, 4, 1, "", "create_tests"]]}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "class", "Python class"], "2": ["py", "method", "Python method"], "3": ["py", "attribute", "Python attribute"], "4": ["py", "function", "Python function"]}, "objtypes": {"0": "py:module", "1": "py:class", "2": "py:method", "3": "py:attribute", "4": "py:function"}, "terms": {"": 3, "0": [2, 3], "00000134": 3, "00000446": 3, "00000489": 3, "04": 3, "0472": 3, "1": [2, 3], "10": 3, "15": 3, "19": 3, "2": 3, "20": 3, "2m": 3, "3": 3, "3534": 3, "359": 3, "473000000": 3, "5": 3, "6": 2, "68": 3, "7": 3, "712p3d": 3, "A": 3, "For": 3, "If": 3, "It": 3, "No": 3, "The": 3, "These": 3, "_": 3, "accord": 3, "activ": [2, 3], "add_messag": [1, 2], "aggreg": 3, "alia": 3, "all": [2, 3], "all_output": [1, 2], "alt": 3, "am": 3, "an": [2, 3], "ani": 2, "annot": 2, "annotate_al": [1, 2], "annotation_sufficiency_scor": [1, 2], "annotationmultisamplereport": [1, 2], "annotationreport": [1, 2], "api": 0, "ar": 3, "arctic": 3, "arg": 2, "arrai": 3, "as_datafram": [1, 2], "as_dict": [1, 2], "aspect": 3, "assert": 2, "attempt": 3, "b": 3, "bad": 2, "badnul": [1, 2], "base": 2, "basic": 3, "below": 3, "best": 3, "between": [2, 3], "bin": 3, "biogeochem": 3, "biolog": 3, "biologi": 3, "bioport": 3, "biosampl": 3, "bool": 2, "both": 3, "c": 3, "carb": 3, "carbon": 3, "care": 3, "case": 2, "categori": [1, 2, 3], "cd": 3, "centuri": 3, "ch4": 3, "challeng": 3, "chang": 3, "check": 3, "checklist": [1, 2, 3], "class": 2, "classdefinit": 2, "climat": 3, "clone": 3, "co2": 3, "code": 3, "com": 3, "combo": 2, "come": 3, "command": 0, "commun": 3, "concis": 3, "config": 3, "contain": 3, "content": 1, "control": 2, "controlledvocabulari": [1, 2], "copi": 3, "core": [1, 2], "creat": [2, 3], "create_test": [1, 2], "current": 3, "cycl": 3, "data": 3, "datafram": 3, "default": 3, "depth": 3, "descript": [1, 2, 3], "detail": 3, "dict": [2, 3], "dictionari": [2, 3], "differ": 3, "dioxid": 3, "directli": 2, "doc": [0, 2], "dramat": 3, "driver": 3, "e": [2, 3], "each": [2, 3], "earth": 3, "ebi": 2, "ecosystem": 3, "ecosystem_categori": 3, "ecosystem_subtyp": 3, "ecosystem_typ": 3, "either": 2, "elev": 3, "en": 2, "ena": 2, "encod": 3, "enhanc": 3, "enum": 2, "enumer": [2, 3], "env": 3, "env_broad_scal": 3, "env_local_scal": 3, "env_medium": 3, "environ": 2, "environment": [2, 3], "environmental_packag": [1, 2], "envo": 3, "error": 2, "exampl": 3, "exit": 3, "export": 3, "fake": 3, "field": [1, 2, 3], "file": 3, "fine": 3, "flask": 3, "folder": 3, "found": 2, "from": [2, 3], "fundament": 3, "g": [2, 3], "gase": 3, "gb0108335": 3, "gene": 3, "gener": 3, "geo": [1, 2, 3], "geo_loc_nam": 3, "geoengin": [1, 2], "geoloc": 2, "git": 3, "global": 3, "glocal": 3, "gold": 3, "googl": 3, "googlemap": 3, "googlemaps_api_kei": 2, "grand": 3, "greenhous": 3, "gsc": 3, "habitat": 3, "has_numeric_valu": 3, "has_raw_valu": 3, "has_unit": 3, "have": 3, "help": 3, "how": 3, "html": 2, "http": [2, 3], "i": 0, "id": 3, "identifi": [1, 2, 3], "implic": 3, "inapplic": [1, 2], "includ": 2, "index": 0, "individu": 2, "infer": [2, 3], "infer_packag": [1, 2], "info": 3, "inform": [2, 3], "input": [1, 2, 3], "instal": 0, "int": 2, "integr": 3, "intend": 3, "intersect": 3, "invalid": 3, "io": 2, "json": 3, "just": 3, "kei": [2, 3], "kiruna": 3, "kwarg": 2, "lat": 3, "lat_lon": 3, "later": 3, "latest": 2, "latitud": 3, "learn": 2, "like": 3, "line": 0, "linkml": 3, "list": [1, 2], "ll": 3, "locat": 3, "lon": 3, "longitud": 3, "m": 3, "machin": 2, "made": 2, "mai": 3, "make": 3, "manifest": 3, "map": [2, 3], "max_sever": [1, 2], "mean": 2, "measur": [2, 3], "measurement_engin": [1, 2], "measurementengin": 2, "measurementsyntax": [1, 2], "mediat": 3, "messag": [1, 2, 3], "messages_by_categori": [1, 2], "metadata": 3, "metagenom": 3, "meter": 3, "methan": 3, "metr": 3, "microbi": 3, "mine": [2, 3], "miss": [2, 3], "missingcor": [1, 2, 3], "mix": 3, "mixs5": 2, "mod_dat": 3, "modul": [0, 1], "more": 3, "multi": 2, "must": 3, "name": 3, "ncbi_taxonomy_nam": 3, "need": 3, "ner": 3, "nmdc": 0, "none": 2, "normal": [2, 3], "northern": 3, "null": 2, "object": [2, 3], "onli": 3, "ontodev": 3, "ontologi": 3, "option": 3, "organism": 3, "ornl": 3, "other": 2, "output": [1, 2, 3], "packag": [0, 1, 3], "packagecombo": [1, 2], "page": 0, "palsa": 3, "panda": 3, "parallel": 3, "pars": 3, "part": 3, "particul": 3, "pass": [1, 2], "path": 3, "perform": [2, 3], "perform_geolocation_infer": [1, 2], "perform_infer": [1, 2], "perform_text_min": [1, 2], "permafrost": 3, "pip": 3, "pipenv_ignore_virtualenv": 3, "plu": 2, "predict": 3, "process": 3, "produc": 3, "python": 3, "python3": 3, "quantityvalu": 3, "quantulum": 3, "r": 3, "rang": 3, "readthedoc": [2, 3], "render": 3, "repair": [2, 3], "report": [1, 2], "report_model": 1, "repres": 3, "represent": 2, "requir": 3, "respond": 3, "return": 2, "rewrit": 3, "run": 3, "sampl": 2, "sample_annot": [0, 1, 3], "sample_collection_sit": 3, "sample_id": [1, 2], "sample_util": 1, "sampleannot": [1, 2], "samplefil": 3, "sampleschema": 2, "scale": 3, "schema": [0, 1, 2], "scienc": 3, "score": 3, "search": 0, "see": 3, "semi": 3, "sequenti": 2, "seri": 2, "set": 2, "sever": [1, 2, 3], "show": 3, "shrinkag": 3, "simpl": 3, "singl": 2, "skip": 3, "slot": 2, "soil": 3, "someth": 3, "sourc": 3, "specifi": 3, "specific_ecosystem": 3, "standard": 2, "start": 0, "static": 2, "statist": 3, "str": 2, "structur": 3, "studi": [2, 3], "study_descript": 3, "submit": 2, "submodul": 1, "suffici": 3, "sweden": 3, "syntax": 2, "system": 3, "take": [2, 3], "target_class": [1, 2], "terrestri": 3, "test": [0, 2], "text": [2, 3], "thaw": 3, "thi": [2, 3], "those": 3, "through": 3, "tidi": [2, 3], "tidy_enumer": [1, 2], "tidy_kei": [1, 2], "tidy_measur": [1, 2], "tidy_nul": [1, 2], "todo": [2, 3], "togeth": 3, "tot": 3, "total": 3, "transform": [2, 3], "true": 3, "tsv": 3, "tupl": 2, "txt": 3, "type": 3, "unclassifi": [1, 2, 3], "underscor": 3, "understand": 3, "unit": [1, 2, 3], "unknown": 2, "unknownfield": [1, 2, 3], "untidi": 3, "unverstand": 3, "us": [2, 3], "usag": 3, "v": [2, 3], "valid": 0, "validate_identifi": [1, 2], "validateonli": 3, "valu": [2, 3], "vocabulari": 2, "wa": 3, "wai": 3, "warn": 2, "was_repair": [1, 2], "web": 0, "wetland": 3, "what": 0, "where": 3, "which": 3, "while": 3, "work": 3, "write": 3, "x": 3, "yaml": 3, "you": 3}, "titles": ["Welcome to sample-annotator\u2019s documentation!", "sample-annotator", "sample_annotator package", "NMDC Sample Annotator API"], "titleterms": {"": 0, "annot": [0, 1, 3], "api": 3, "command": 3, "content": [0, 2], "doc": 3, "document": 0, "i": 3, "indic": 0, "instal": 3, "line": 3, "modul": [2, 3], "nmdc": 3, "packag": 2, "pipenv": 3, "report": 3, "report_model": 2, "sampl": [0, 1, 3], "sample_annot": 2, "sample_util": 2, "schema": 3, "start": 3, "submodul": 2, "tabl": 0, "test": 3, "valid": 3, "venv": 3, "web": 3, "welcom": 0, "what": 3}})
\ No newline at end of file
diff --git a/docs/static/intro.html b/docs/static/intro.html
new file mode 100644
index 0000000..c2f5671
--- /dev/null
+++ b/docs/static/intro.html
@@ -0,0 +1,360 @@
+
+
+
+
+
+
+
+
+ NMDC Sample Annotator API — sample-annotator 0.0.1 documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ sample-annotator
+
+
+
+
+
+
+
+
+
+NMDC Sample Annotator API
+
+Installing
+
+pipenv
+This requires python 3.7.x or later (as default python).
+If you have pipenv installed:
+ git clone ...
+cd sample-annotator
+make test
+
+
+
+
+venv
+For those using venv, you’ll need something like:
+ git clone ...
+cd sample-annotator
+python3.7 -m venv env
+source ./env/bin/activate
+pip install pipenv
+PIPENV_IGNORE_VIRTUALENVS = 1 make test
+
+
+While there may be more concise ways of running commands like those below, this works:
+PIPENV_IGNORE_VIRTUALENVS = 1 pipenv run python -m sample_annotator.sample_annotator -R examples/report.tsv examples/gold.json
+
+
+
+
+
+What is it?
+This is a python and flask API for performing annotation of samples from semi-structured or untidy data
+The API takes as input a JSON object or dictionary representing a simple sample, where each key is a metadata field
+It will attempt to tidy and infer missing data according to a specified schema (currently MIxS)
+
+
+Command Line
+ pipenv run annotate-sample --help
+
+Usage: annotate-samples [ OPTIONS] SAMPLEFILE
+
+ Annotate a file of samples, producing a "repaired" /enhanced sample file as
+ output, together with a report
+
+ The input file must be a JSON fine containing an array of dicts
+
+Options:
+ -v, --validateonly / -g, --generate
+ Just validate / generate output ( default:
+ generate)
+
+ -s, --output TEXT JSON for tidied samples
+ -R, --report-file TEXT report file
+ -G, --googlemaps-api-key-path TEXT
+ path to file containing google maps API KEY
+ -B, --bioportal-api-key-path TEXT
+ path to file containing bioportal API KEY
+ --help Show this message and exit.
+
+
+E.g.
+ pipenv run annotate-sample -G config/googlemaps-api-key.txt -R examples/report.tsv examples/gold.json
+
+
+This will transform input such as:
+[
+ {
+ "id" : "gold:Gb0108335" ,
+ "community" : "microbial communities" ,
+ "depth" : "0.0 m" ,
+ "ecosystem" : "Environmental" ,
+ "ecosystem_category" : "Terrestrial" ,
+ "ecosystem_subtype" : "Wetlands" ,
+ "ecosystem_type" : "Soil" ,
+ "env_broad_scale" : "ENVO:00000446" ,
+ "env_local_scale" : "ENVO:00000489" ,
+ "env_medium" : "ENVO:00000134" ,
+ "geo_loc_name" : "Sweden: Kiruna" ,
+ "habitat" : "Thawing permafrost" ,
+ "identifier" : "studying carbon transformations" ,
+ "lat_lon" : "68.3534 19.0472" ,
+ "location" : "from the Arctic" ,
+ "mod_date" : "15-MAY-20 10.04.19.473000000 AM" ,
+ "name" : "Thawing permafrost microbial communities from the Arctic, studying carbon transformations - Permafrost 712P3D" ,
+ "ncbi_taxonomy_name" : "permafrost metagenome" ,
+ "sample_collection_site" : "Palsa" ,
+ "specific_ecosystem" : "Permafrost" ,
+ "study_description" : "A fundamental challenge of microbial environmental science is to understand how earth systems will respond to climate change. A parallel challenge in biology is to unverstand how information encoded in organismal genes manifests as biogeochemical processes at ecosystem-to-global scales. These grand challenges intersect in the need to understand the glocal carbon (C) cycle, which is both mediated by biological processes and a key driver of climate through the greenhouse gases carbon dioxide (CO2) and methane (CH4). A key aspect of these challenges is the C cycle implications of the predicted dramatic shrinkage in northern permafrost in the coming century." ,
+ "type" : "nmdc:Biosample"
+ },
+
+
+into:
+[
+ {
+ "id" : "gold:Gb0108335" ,
+ "community" : "microbial communities" ,
+ "depth" : {
+ "has_numeric_value" : 0.0 ,
+ "has_raw_value" : "0.0 m" ,
+ "has_unit" : "metre"
+ },
+ "ecosystem" : "Environmental" ,
+ "ecosystem_category" : "Terrestrial" ,
+ "ecosystem_subtype" : "Wetlands" ,
+ "ecosystem_type" : "Soil" ,
+ "elev" : {
+ "has_numeric_value" : 359 ,
+ "has_unit" : "meter"
+ },
+ "env_broad_scale" : "ENVO:00000446" ,
+ "env_local_scale" : "ENVO:00000489" ,
+ "env_medium" : "ENVO:00000134" ,
+ "geo_loc_name" : "Sweden: Kiruna" ,
+ "habitat" : "Thawing permafrost" ,
+ "identifier" : "studying carbon transformations" ,
+ "lat_lon" : {
+ "latitude" : 68.3534 ,
+ "longitude" : 19.0472
+ },
+ "location" : "from the Arctic" ,
+ "mod_date" : "15-MAY-20 10.04.19.473000000 AM" ,
+ "name" : "Thawing permafrost microbial communities from the Arctic, studying carbon transformations - Permafrost 712P3D" ,
+ "ncbi_taxonomy_name" : "permafrost metagenome" ,
+ "sample_collection_site" : "Palsa" ,
+ "specific_ecosystem" : "Permafrost" ,
+ "study_description" : "A fundamental challenge of microbial environmental science is to understand how earth systems will respond to climate change. A parallel challenge in biology is to unverstand how information encoded in organismal genes manifests as biogeochemical processes at ecosystem-to-global scales. These grand challenges intersect in the need to understand the glocal carbon (C) cycle, which is both mediated by biological processes and a key driver of climate through the greenhouse gases carbon dioxide (CO2) and methane (CH4). A key aspect of these challenges is the C cycle implications of the predicted dramatic shrinkage in northern permafrost in the coming century." ,
+
+
+Differences between input and output:
+
+measurement fields are normalized
+information inferred from lat_lon (currently only elev )
+TODO: ENVO from text mining
+TODO: annotation sufficiency score
+TODO: more…
+
+
+Validation reports
+These are created as report objects, and exported to pandas dataframes for basic statistical aggregation.
+See tests for details
+Example report:
+|description|severity|field|was_repaired|category|
+|—|—|—|—|—|
+|No package specified|1|||Category.MissingCore|
+|No checklist specified|1|||Category.Unclassified|
+|Key not underscored: total particulate carbon|1||True|Category.Unclassified|
+|Invalid field: id|1|||Category.UnknownField|
+|Alias used: total_particulate_carbon => tot_part_carb|1||True|Category.Unclassified|
+|Parsed unit-value: 2.0 metre|1|||Category.Unclassified|
+|Missing unit 5|1|||Category.Unclassified|
+|Skipping geo-checks|0|||Category.Unclassified|
+
+
+
+API Docs
+TODO: readthedocs
+
+
+Testing
+Currently the best way to understand this code is to understand the tests
+
+This contains ‘fake’ samples that are intended to test validation and repair
+
+
+Schema Validation
+See the schema folder –
+this contains a copy of the LinkML rendering of the MIxS schema
+from mixs-source which will later be integrated by GSC
+
+
+Modules
+
+Each module will take care of different aspects
+For example, the measurement module will normalized all fields in the schema with range QuantityValue
+E.g. Input:
+sample :
+ id : TEST:1
+ alt : 2m
+ ...
+
+
+Repair Output:
+sample :
+ id : TEST:1
+ alt :
+ has_numeric_value : 2.0
+ has_raw_value : 2m
+ has_unit : metre
+ ...
+
+
+
+
+Starting the web API
+
+TODO: write flask code
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file