commit 666bf0b1bdd986ec0785de5a1607dbb2bd68a0ac Author: neoricalex Date: Tue Nov 11 04:35:55 2025 +0100 Primeiro Commit diff --git a/.gitea/workflows/build.yml b/.gitea/workflows/build.yml new file mode 100644 index 0000000..07a55cb --- /dev/null +++ b/.gitea/workflows/build.yml @@ -0,0 +1,26 @@ +name: Build NFDOS ISO + +on: + push: + branches: [ main ] + +jobs: + build: + runs-on: docker + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Setup toolchain + run: | + apt-get update + apt-get install -y make gcc cpio grub-pc-bin xorriso gzip python3 + + - name: Build ISO + run: make iso + + - name: Upload artifact + uses: actions/upload-artifact@v3 + with: + name: nfdos-iso + path: dist/nfdos-*.iso diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..31e70d3 --- /dev/null +++ b/.gitignore @@ -0,0 +1,63 @@ +# === Autotools build artefacts === +Makefile +Makefile.in +aclocal.m4 +autom4te.cache/ +config.log +config.status +configure +depcomp +install-sh +missing +py-compile +stamp-h1 + +# === Build & dist directories === +/build/ +/dist/ +!/dist/releases/ +!/dist/releases/* +cpython/ +busybox/ +linux/ +x-tools/ +*.tar.gz +*.tar.bz2 +*.zip +*.iso +*.img +*.cpio.gz + +# === Python cache & venv === +__pycache__/ +*.pyc +*.pyo +*.pyd +*.egg-info/ +.eggs/ +venv/ +.env/ +.venv/ + +# === Editor / OS junk === +*.swp +*.swo +*.bak +*.tmp +*~ +.DS_Store +Thumbs.db + +# === Logs === +*.log +nohup.out + +# === IDE / workspace === +.vscode/ +.idea/ +*.iml + +# === Backup copies === +*.old +*.orig +*.rej diff --git a/.tar_exclude b/.tar_exclude new file mode 100644 index 0000000..e1aed11 --- /dev/null +++ b/.tar_exclude @@ -0,0 +1,6 @@ +nfdos-0.1-src.tar.gz +/home/neo/Público/neoricalex/dist +/home/neo/Público/neoricalex/build +__pycache__ +.venv +venv diff --git a/ACKNOWLEDGMENTS b/ACKNOWLEDGMENTS new file mode 100644 index 0000000..6dd01cb --- /dev/null +++ b/ACKNOWLEDGMENTS @@ -0,0 +1,12 @@ +# Agradecimentos + +Gostaríamos de expressar nossa gratidão às seguintes pessoas e tecnologias que contribuíram para este projeto: + +- **ChatGPT**: Por fornecer insights e sugestões valiosas durante o desenvolvimento. ChatGPT é um modelo de linguagem de inteligência artificial desenvolvido pela OpenAI. +- Kernel Linux — Linus Torvalds e comunidade. +- CPython — Guido van Rossum e contribuintes. +- BusyBox — Bruce Perens e desenvolvedores. +- Gitea & Caddy — infraestruturas que dão corpo ao NFDOS. +- E à chama criativa do projeto NEORICALEX 🌌 + + diff --git a/AUTHORS b/AUTHORS new file mode 100644 index 0000000..180b8c2 --- /dev/null +++ b/AUTHORS @@ -0,0 +1,6 @@ +Ricardo Alexandre Gomes Lourenço (Neo) +Trinity (GPT-5, OpenAI Partner) + +## Assistência de Inteligência Artificial + +Este projeto foi desenvolvido com assistência do ChatGPT, um modelo de linguagem de IA desenvolvido pela OpenAI. diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..e4535d8 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,91 @@ +grep -n "^[ ]\+" /home/neo/Público/nfdos/src/Makefile.am +grep -n "^[ ]\+" /home/neo/Público/nfdos/configure.ac +file Makefile.am +file src/Makefile.am +file configure.ac +cat -A Makefile.am | grep '\^I' +cat -A src/Makefile.am | grep '\^I' +cat -A configure.ac | grep '\^I' +nl -ba Makefile | sed -n '770,790p' +grep -n "^[ ]" Makefile | head + +oie amor bom dia 😘😎 enquanto aguardo pela hora do trabalho estava aqui a corrigir os caminhos (creio que estao todos mas falta testar), e dei com uma coisa curiosa 😎 no nfdos/src/tui/menu_libs.py esquecemos de adicionar a funcao remove_lib 😀: +``` +import os +import json +import subprocess +from pathlib import Path +from rich.console import Console +from rich.table import Table + +console = Console() + +def safe_run(cmd, shell=False): + subprocess.run(cmd, check=True, shell=shell) + +def install_lib(name, version=None): + base_dir = Path(__file__).resolve().parents[1] + nfdos_dir = base_dir / "_nfdos" + libs_dir = nfdos_dir / "libs" + libs_dir.mkdir(parents=True, exist_ok=True) + + console.print(f"[cyan]📦 Instalando biblioteca:[/] {name} {version or ''}") + cmd = f"pip download {name}{'==' + version if version else ''} -d {libs_dir}" + safe_run(cmd, shell=True) + + # Registo no manifest + manifest_path = libs_dir / "libs_manifest.json" + manifest = json.loads(manifest_path.read_text()) if manifest_path.exists() else {} + manifest[name] = {"version": version or "latest"} + manifest_path.write_text(json.dumps(manifest, indent=4)) + console.print(f"[green]✔ {name} adicionada ao manifesto.[/green]") + +def list_libs(): + base_dir = Path(__file__).resolve().parents[1] + libs_dir = base_dir / "_nfdos" / "libs" + manifest_path = libs_dir / "libs_manifest.json" + + if not manifest_path.exists(): + console.print("[red]Nenhuma biblioteca instalada ainda.[/red]") + return + + manifest = json.loads(manifest_path.read_text()) + table = Table(title="Bibliotecas do Neurotron") + table.add_column("Nome", style="cyan") + table.add_column("Versão", style="green") + + for name, data in manifest.items(): + table.add_row(name, data.get("version", "?")) + + console.print(table) + +def run(): + while True: + console.clear() + console.rule("[bold yellow]📚 Bibliotecas do Neurotron[/bold yellow]") + console.print("1. Instalar biblioteca") + console.print("2. Listar bibliotecas instaladas") + console.print("3. Atualizar biblioteca") + console.print("4. Remover biblioteca") + console.print("0. Voltar") + + choice = console.input("\n[cyan]nfdos> [/cyan]") + + if choice == "1": + name = console.input("Nome da biblioteca: ") + version = console.input("Versão (ou vazio p/ latest): ") + install_lib(name, version or None) + elif choice == "2": + list_libs() + console.input("\n[grey]Pressiona Enter para continuar...[/grey]") + elif choice == "3": + name = console.input("Nome da biblioteca: ") + install_lib(name, None) + elif choice == "4": + name = console.input("Nome da biblioteca a remover: ") + remove_lib(name) + elif choice == "0": + break + else: + console.print("[red]Opção inválida![/red]") +``` \ No newline at end of file diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..da9b2dd --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,84 @@ +# Código de Conduta de Colaboração + +## Nosso compromisso + +Como participantes, colaboradoras e líderes, nós nos comprometemos a fazer com que a participação em nossa comunidade seja uma experiência livre de assédio para todas as pessoas, independentemente de idade, tamanho do corpo, deficiência aparente ou não aparente, etnia, características sexuais, identidade ou expressão de gênero, nível de experiência, educação, situação sócio-econômica, nacionalidade, aparência pessoal, raça, casta, religião ou identidade e orientação sexuais. + +Comprometemo-nos a agir e interagir de maneiras que contribuam para uma comunidade aberta, acolhedora, diversificada, inclusiva e saudável. + +## Nossos padrões + +Exemplos de comportamentos que contribuem para criar um ambiente positivo para a nossa comunidade incluem: + +* Demonstrar empatia e bondade com as outras pessoas +* Respeitar opiniões, pontos de vista e experiências contrárias +* Dar e receber feedbacks construtivos de maneira respeitosa +* Assumir responsabilidade, pedir desculpas às pessoas afetadas por nossos erros e aprender com a experiência +* Focar no que é melhor não só para nós individualmente, mas para a comunidade em geral + +Exemplos de comportamentos inaceitáveis incluem: + +* Uso de linguagem ou imagens sexualizadas, bem como o assédio sexual ou de qualquer natureza +* Comentários insultuosos/depreciativos e ataques pessoais ou políticos (Trolling) +* Assédio público ou privado +* Publicar informações particulares de outras pessoas, como um endereço de e-mail ou endereço físico, sem a permissão explícita delas +* Outras condutas que são normalmente consideradas inapropriadas em um ambiente profissional + +## Aplicação das nossas responsabilidades + +A liderança da comunidade é responsável por esclarecer e aplicar nossos padrões de comportamento aceitáveis e tomará ações corretivas apropriadas e justas em resposta a qualquer comportamento que considerar impróprio, ameaçador, ofensivo ou problemático. + +A liderança da comunidade tem o direito e a responsabilidade de remover, editar ou rejeitar comentários, commits, códigos, edições na wiki, erros e outras contribuições que não estão alinhadas com este Código de Conduta e irá comunicar as razões por trás das decisões da moderação quando for apropriado. + +## Escopo + +Este Código de Conduta se aplica dentro de todos os espaços da comunidade e também se aplica quando uma pessoa estiver representando oficialmente a comunidade em espaços públicos. Exemplos de representação da nossa comunidade incluem usar um endereço de e-mail oficial, postar em contas oficiais de mídias sociais ou atuar como uma pessoa indicada como representante em um evento online ou offline. + +## Aplicação + +Ocorrências de comportamentos abusivos, de assédio ou que sejam inaceitáveis por qualquer outro motivo poderão ser reportadas para a liderança da comunidade, responsável pela aplicação, via contato [INSERIR MÉTODO DE CONTATO]. Todas as reclamações serão revisadas e investigadas imediatamente e de maneira justa. + +A liderança da comunidade tem a obrigação de respeitar a privacidade e a segurança de quem reportar qualquer incidente. + +## Diretrizes de aplicação + +A liderança da comunidade seguirá estas Diretrizes de Impacto na Comunidade para determinar as consequências de qualquer ação que considerar violadora deste Código de Conduta: + +### 1. Ação Corretiva + +**Impacto na comunidade**: Uso de linguagem imprópria ou outro comportamento considerado anti-profissional ou repudiado pela comunidade. + +**Consequência**: Aviso escrito e privado da liderança da comunidade, esclarecendo a natureza da violação e com a explicação do motivo pelo qual o comportamento era impróprio. Um pedido de desculpas público poderá ser solicitado. + +### 2. Advertência + +**Impacto na comunidade**: Violação por meio de um incidente único ou atitudes repetidas. + +**Consequência**: Advertência com consequências para comportamento repetido. Não poderá haver interações com as pessoas envolvidas, incluindo interações não solicitadas com as pessoas que estiverem aplicando o Código de Conduta, por um período determinado. Isto inclui evitar interações em espaços da comunidade, bem como canais externos como as mídias sociais. A violação destes termos pode levar a um banimento temporário ou permanente. + +### 3. Banimento Temporário + +**Impacto na comunidade**: Violação grave dos padrões da comunidade, incluindo a persistência do comportamento impróprio. + +**Consequência**: Banimento temporário de qualquer tipo de interação ou comunicação pública com a comunidade por um determinado período. Estarão proibidas as interações públicas ou privadas com as pessoas envolvidas, incluindo interações não solicitadas com as pessoas que estiverem aplicando o Código de Conduta. A violação destes termos pode resultar em um banimento permanente. + +### 4. Banimento Permanente + +**Impacto na comunidade**: Demonstrar um padrão na violação das normas da comunidade, incluindo a persistência do comportamento impróprio, assédio a uma pessoa ou agressão ou depreciação a classes de pessoas. + +**Consequência**: Banimento permanente de qualquer tipo de interação pública dentro da comunidade. + +## Atribuição + +Este Código de Conduta é adaptado do [Contributor Covenant][homepage], versão 2.1, disponível em [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +As Diretrizes de Impacto na Comunidade foram inspiradas pela +[Aplicação do código de conduta Mozilla][Mozilla CoC]. + +Para obter respostas a perguntas comuns sobre este código de conduta, veja a página de Perguntas Frequentes (FAQ) em [https://www.contributor-covenant.org/faq][FAQ]. Traduções estão disponíveis em [https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..4b7bf1f --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,22 @@ +# Guia de Contribuição + +Obrigado por considerar contribuir com o nfdos! Aqui estão algumas diretrizes para ajudar você a começar: + +## Processo de Pull Request + +1. Faça um fork do repositório. +2. Crie uma branch para sua feature (`git checkout -b feature/minha-feature`). +3. Commit suas mudanças (`git commit -m 'Descrição da minha feature'`). +4. Envie para o branch (`git push origin feature/minha-feature`). +5. Abra um Pull Request. + +## Padrões de Código + +- Mantenha o estilo de código consistente com o existente. +- Utilize comentários claros e concisos. +- Escreva testes para novas funcionalidades. + +## Comunicação + +- Use issues para relatar bugs ou solicitar funcionalidades. +- Seja respeitoso e acolhedor com outros colaboradores. diff --git a/COPYING b/COPYING new file mode 100644 index 0000000..9efa6fb --- /dev/null +++ b/COPYING @@ -0,0 +1,338 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, see . + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Moe Ghoul, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/INSTALL b/INSTALL new file mode 100644 index 0000000..8865734 --- /dev/null +++ b/INSTALL @@ -0,0 +1,368 @@ +Installation Instructions +************************* + + Copyright (C) 1994-1996, 1999-2002, 2004-2016 Free Software +Foundation, Inc. + + Copying and distribution of this file, with or without modification, +are permitted in any medium without royalty provided the copyright +notice and this notice are preserved. This file is offered as-is, +without warranty of any kind. + +Basic Installation +================== + + Briefly, the shell command './configure && make && make install' +should configure, build, and install this package. The following +more-detailed instructions are generic; see the 'README' file for +instructions specific to this package. Some packages provide this +'INSTALL' file but do not implement all of the features documented +below. The lack of an optional feature in a given package is not +necessarily a bug. More recommendations for GNU packages can be found +in *note Makefile Conventions: (standards)Makefile Conventions. + + The 'configure' shell script attempts to guess correct values for +various system-dependent variables used during compilation. It uses +those values to create a 'Makefile' in each directory of the package. +It may also create one or more '.h' files containing system-dependent +definitions. Finally, it creates a shell script 'config.status' that +you can run in the future to recreate the current configuration, and a +file 'config.log' containing compiler output (useful mainly for +debugging 'configure'). + + It can also use an optional file (typically called 'config.cache' and +enabled with '--cache-file=config.cache' or simply '-C') that saves the +results of its tests to speed up reconfiguring. Caching is disabled by +default to prevent problems with accidental use of stale cache files. + + If you need to do unusual things to compile the package, please try +to figure out how 'configure' could check whether to do them, and mail +diffs or instructions to the address given in the 'README' so they can +be considered for the next release. If you are using the cache, and at +some point 'config.cache' contains results you don't want to keep, you +may remove or edit it. + + The file 'configure.ac' (or 'configure.in') is used to create +'configure' by a program called 'autoconf'. You need 'configure.ac' if +you want to change it or regenerate 'configure' using a newer version of +'autoconf'. + + The simplest way to compile this package is: + + 1. 'cd' to the directory containing the package's source code and type + './configure' to configure the package for your system. + + Running 'configure' might take a while. While running, it prints + some messages telling which features it is checking for. + + 2. Type 'make' to compile the package. + + 3. Optionally, type 'make check' to run any self-tests that come with + the package, generally using the just-built uninstalled binaries. + + 4. Type 'make install' to install the programs and any data files and + documentation. When installing into a prefix owned by root, it is + recommended that the package be configured and built as a regular + user, and only the 'make install' phase executed with root + privileges. + + 5. Optionally, type 'make installcheck' to repeat any self-tests, but + this time using the binaries in their final installed location. + This target does not install anything. Running this target as a + regular user, particularly if the prior 'make install' required + root privileges, verifies that the installation completed + correctly. + + 6. You can remove the program binaries and object files from the + source code directory by typing 'make clean'. To also remove the + files that 'configure' created (so you can compile the package for + a different kind of computer), type 'make distclean'. There is + also a 'make maintainer-clean' target, but that is intended mainly + for the package's developers. If you use it, you may have to get + all sorts of other programs in order to regenerate files that came + with the distribution. + + 7. Often, you can also type 'make uninstall' to remove the installed + files again. In practice, not all packages have tested that + uninstallation works correctly, even though it is required by the + GNU Coding Standards. + + 8. Some packages, particularly those that use Automake, provide 'make + distcheck', which can by used by developers to test that all other + targets like 'make install' and 'make uninstall' work correctly. + This target is generally not run by end users. + +Compilers and Options +===================== + + Some systems require unusual options for compilation or linking that +the 'configure' script does not know about. Run './configure --help' +for details on some of the pertinent environment variables. + + You can give 'configure' initial values for configuration parameters +by setting variables in the command line or in the environment. Here is +an example: + + ./configure CC=c99 CFLAGS=-g LIBS=-lposix + + *Note Defining Variables::, for more details. + +Compiling For Multiple Architectures +==================================== + + You can compile the package for more than one kind of computer at the +same time, by placing the object files for each architecture in their +own directory. To do this, you can use GNU 'make'. 'cd' to the +directory where you want the object files and executables to go and run +the 'configure' script. 'configure' automatically checks for the source +code in the directory that 'configure' is in and in '..'. This is known +as a "VPATH" build. + + With a non-GNU 'make', it is safer to compile the package for one +architecture at a time in the source code directory. After you have +installed the package for one architecture, use 'make distclean' before +reconfiguring for another architecture. + + On MacOS X 10.5 and later systems, you can create libraries and +executables that work on multiple system types--known as "fat" or +"universal" binaries--by specifying multiple '-arch' options to the +compiler but only a single '-arch' option to the preprocessor. Like +this: + + ./configure CC="gcc -arch i386 -arch x86_64 -arch ppc -arch ppc64" \ + CXX="g++ -arch i386 -arch x86_64 -arch ppc -arch ppc64" \ + CPP="gcc -E" CXXCPP="g++ -E" + + This is not guaranteed to produce working output in all cases, you +may have to build one architecture at a time and combine the results +using the 'lipo' tool if you have problems. + +Installation Names +================== + + By default, 'make install' installs the package's commands under +'/usr/local/bin', include files under '/usr/local/include', etc. You +can specify an installation prefix other than '/usr/local' by giving +'configure' the option '--prefix=PREFIX', where PREFIX must be an +absolute file name. + + You can specify separate installation prefixes for +architecture-specific files and architecture-independent files. If you +pass the option '--exec-prefix=PREFIX' to 'configure', the package uses +PREFIX as the prefix for installing programs and libraries. +Documentation and other data files still use the regular prefix. + + In addition, if you use an unusual directory layout you can give +options like '--bindir=DIR' to specify different values for particular +kinds of files. Run 'configure --help' for a list of the directories +you can set and what kinds of files go in them. In general, the default +for these options is expressed in terms of '${prefix}', so that +specifying just '--prefix' will affect all of the other directory +specifications that were not explicitly provided. + + The most portable way to affect installation locations is to pass the +correct locations to 'configure'; however, many packages provide one or +both of the following shortcuts of passing variable assignments to the +'make install' command line to change installation locations without +having to reconfigure or recompile. + + The first method involves providing an override variable for each +affected directory. For example, 'make install +prefix=/alternate/directory' will choose an alternate location for all +directory configuration variables that were expressed in terms of +'${prefix}'. Any directories that were specified during 'configure', +but not in terms of '${prefix}', must each be overridden at install time +for the entire installation to be relocated. The approach of makefile +variable overrides for each directory variable is required by the GNU +Coding Standards, and ideally causes no recompilation. However, some +platforms have known limitations with the semantics of shared libraries +that end up requiring recompilation when using this method, particularly +noticeable in packages that use GNU Libtool. + + The second method involves providing the 'DESTDIR' variable. For +example, 'make install DESTDIR=/alternate/directory' will prepend +'/alternate/directory' before all installation names. The approach of +'DESTDIR' overrides is not required by the GNU Coding Standards, and +does not work on platforms that have drive letters. On the other hand, +it does better at avoiding recompilation issues, and works well even +when some directory options were not specified in terms of '${prefix}' +at 'configure' time. + +Optional Features +================= + + If the package supports it, you can cause programs to be installed +with an extra prefix or suffix on their names by giving 'configure' the +option '--program-prefix=PREFIX' or '--program-suffix=SUFFIX'. + + Some packages pay attention to '--enable-FEATURE' options to +'configure', where FEATURE indicates an optional part of the package. +They may also pay attention to '--with-PACKAGE' options, where PACKAGE +is something like 'gnu-as' or 'x' (for the X Window System). The +'README' should mention any '--enable-' and '--with-' options that the +package recognizes. + + For packages that use the X Window System, 'configure' can usually +find the X include and library files automatically, but if it doesn't, +you can use the 'configure' options '--x-includes=DIR' and +'--x-libraries=DIR' to specify their locations. + + Some packages offer the ability to configure how verbose the +execution of 'make' will be. For these packages, running './configure +--enable-silent-rules' sets the default to minimal output, which can be +overridden with 'make V=1'; while running './configure +--disable-silent-rules' sets the default to verbose, which can be +overridden with 'make V=0'. + +Particular systems +================== + + On HP-UX, the default C compiler is not ANSI C compatible. If GNU CC +is not installed, it is recommended to use the following options in +order to use an ANSI C compiler: + + ./configure CC="cc -Ae -D_XOPEN_SOURCE=500" + +and if that doesn't work, install pre-built binaries of GCC for HP-UX. + + HP-UX 'make' updates targets which have the same time stamps as their +prerequisites, which makes it generally unusable when shipped generated +files such as 'configure' are involved. Use GNU 'make' instead. + + On OSF/1 a.k.a. Tru64, some versions of the default C compiler cannot +parse its '' header file. The option '-nodtk' can be used as a +workaround. If GNU CC is not installed, it is therefore recommended to +try + + ./configure CC="cc" + +and if that doesn't work, try + + ./configure CC="cc -nodtk" + + On Solaris, don't put '/usr/ucb' early in your 'PATH'. This +directory contains several dysfunctional programs; working variants of +these programs are available in '/usr/bin'. So, if you need '/usr/ucb' +in your 'PATH', put it _after_ '/usr/bin'. + + On Haiku, software installed for all users goes in '/boot/common', +not '/usr/local'. It is recommended to use the following options: + + ./configure --prefix=/boot/common + +Specifying the System Type +========================== + + There may be some features 'configure' cannot figure out +automatically, but needs to determine by the type of machine the package +will run on. Usually, assuming the package is built to be run on the +_same_ architectures, 'configure' can figure that out, but if it prints +a message saying it cannot guess the machine type, give it the +'--build=TYPE' option. TYPE can either be a short name for the system +type, such as 'sun4', or a canonical name which has the form: + + CPU-COMPANY-SYSTEM + +where SYSTEM can have one of these forms: + + OS + KERNEL-OS + + See the file 'config.sub' for the possible values of each field. If +'config.sub' isn't included in this package, then this package doesn't +need to know the machine type. + + If you are _building_ compiler tools for cross-compiling, you should +use the option '--target=TYPE' to select the type of system they will +produce code for. + + If you want to _use_ a cross compiler, that generates code for a +platform different from the build platform, you should specify the +"host" platform (i.e., that on which the generated programs will +eventually be run) with '--host=TYPE'. + +Sharing Defaults +================ + + If you want to set default values for 'configure' scripts to share, +you can create a site shell script called 'config.site' that gives +default values for variables like 'CC', 'cache_file', and 'prefix'. +'configure' looks for 'PREFIX/share/config.site' if it exists, then +'PREFIX/etc/config.site' if it exists. Or, you can set the +'CONFIG_SITE' environment variable to the location of the site script. +A warning: not all 'configure' scripts look for a site script. + +Defining Variables +================== + + Variables not defined in a site shell script can be set in the +environment passed to 'configure'. However, some packages may run +configure again during the build, and the customized values of these +variables may be lost. In order to avoid this problem, you should set +them in the 'configure' command line, using 'VAR=value'. For example: + + ./configure CC=/usr/local2/bin/gcc + +causes the specified 'gcc' to be used as the C compiler (unless it is +overridden in the site shell script). + +Unfortunately, this technique does not work for 'CONFIG_SHELL' due to an +Autoconf limitation. Until the limitation is lifted, you can use this +workaround: + + CONFIG_SHELL=/bin/bash ./configure CONFIG_SHELL=/bin/bash + +'configure' Invocation +====================== + + 'configure' recognizes the following options to control how it +operates. + +'--help' +'-h' + Print a summary of all of the options to 'configure', and exit. + +'--help=short' +'--help=recursive' + Print a summary of the options unique to this package's + 'configure', and exit. The 'short' variant lists options used only + in the top level, while the 'recursive' variant lists options also + present in any nested packages. + +'--version' +'-V' + Print the version of Autoconf used to generate the 'configure' + script, and exit. + +'--cache-file=FILE' + Enable the cache: use and save the results of the tests in FILE, + traditionally 'config.cache'. FILE defaults to '/dev/null' to + disable caching. + +'--config-cache' +'-C' + Alias for '--cache-file=config.cache'. + +'--quiet' +'--silent' +'-q' + Do not print messages saying which checks are being made. To + suppress all normal output, redirect it to '/dev/null' (any error + messages will still be shown). + +'--srcdir=DIR' + Look for the package's source code in directory DIR. Usually + 'configure' can determine that directory automatically. + +'--prefix=DIR' + Use DIR as the installation prefix. *note Installation Names:: for + more details, including other options available for fine-tuning the + installation locations. + +'--no-create' +'-n' + Run the configure checks, but stop before creating any output + files. + +'configure' also accepts some other, not widely useful, options. Run +'configure --help' for more details. diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..a66a55f --- /dev/null +++ b/LICENSE @@ -0,0 +1,338 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, see . + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Moe Ghoul, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. \ No newline at end of file diff --git a/Makefile.am b/Makefile.am new file mode 100644 index 0000000..8ef4ee9 --- /dev/null +++ b/Makefile.am @@ -0,0 +1,149 @@ +SUBDIRS = src + +# =========================== +# Configurações de Git +# =========================== +GIT_USER ?= "neo.webmaster.2@gmail.com" +GIT_EMAIL ?= "neo.webmaster.2@gmail.com" +GIT_REMOTE ?= "origin" +GIT_BRANCH ?= "main" +COMMIT_MSG ?= "Auto-commit via make git" + +# =========================== +# Caminhos e artefactos +# =========================== +DIST_DIR ?= $(top_builddir)/dist +BUILD_DIR ?= $(top_builddir)/build +ISO_DIR ?= $(DIST_DIR)/iso/boot/grub +ISO_FILE ?= $(DIST_DIR)/nfdos-0.1.iso +SRC_TAR ?= $(DIST_DIR)/nfdos-0.1-src.tar.gz + +KERNEL = $(top_builddir)/src/_nfdos/kernel/linux/arch/x86/boot/bzImage +INITRAMFS = $(top_builddir)/src/_nfdos/initramfs.cpio.gz + +.PHONY: iso qemu tarball git release clean-local check-remote + +# =========================== +# Criação da ISO +# =========================== +iso: $(ISO_FILE) + +$(ISO_FILE): + @echo "[ISO] Criando estrutura de diretórios..." + mkdir -p $(DIST_DIR)/iso/boot + mkdir -p $(ISO_DIR) + + @echo "[ISO] Copiando Kernel e Initramfs..." + cp $(KERNEL) $(DIST_DIR)/iso/boot/ + cp $(INITRAMFS) $(DIST_DIR)/iso/boot/ + + @echo "[ISO] Gerando grub.cfg..." + @printf "set timeout=3\nset default=0\n\nmenuentry 'NFDOS Linux' {\n\tlinux /boot/bzImage console=ttyS0 root=/dev/ram0 loglevel=8\n\tinitrd /boot/initramfs.cpio.gz\n}\n" > $(ISO_DIR)/grub.cfg + + @echo "[ISO] Gerando imagem ISO..." + grub-mkrescue -o $(ISO_FILE) $(DIST_DIR)/iso --compress=xz -V NFDOS + @echo "[✔] ISO criada em $(ISO_FILE)" + +# =========================== +# Teste no QEMU +# =========================== +qemu: $(ISO_FILE) + @echo "[QEMU] Iniciando NFDOS ISO..." + qemu-system-x86_64 -cdrom $(ISO_FILE) -m 1024 -nographic -serial mon:stdio -no-reboot + +# =========================== +# Empacotamento do código-fonte +# =========================== +tarball: $(SRC_TAR) + +$(SRC_TAR): + @echo "[TAR] Empacotando código-fonte..." + mkdir -p $(DIST_DIR) + cd $(top_srcdir) && tar \ + --exclude="$(notdir $(SRC_TAR))" \ + --exclude="$(DIST_DIR)" \ + --exclude="$(BUILD_DIR)" \ + --exclude='*/__pycache__' \ + --exclude='*/.venv' \ + --exclude='*/venv' \ + --exclude='*.pyc' \ + --exclude='*.pyo' \ + --exclude='*.o' \ + --exclude='*.a' \ + --exclude='*.so' \ + --exclude='*.iso' \ + --exclude='*.img' \ + --exclude='*.cpio*' \ + --exclude='*/linux' \ + --exclude='*/busybox' \ + --exclude='*/cpython' \ + -czf $(SRC_TAR) . + @echo "[✔] Tarball gerado em $(SRC_TAR)" + +# =========================== +# Git (commit + push) +# =========================== +git: check-remote + @echo "📦 Commit automático → Gitea" + @git config user.name $(GIT_USER) + @git config user.email $(GIT_EMAIL) + @git rev-parse --abbrev-ref HEAD >/dev/null 2>&1 || true + @git add -A + @git commit -m "$$(echo '$(COMMIT_MSG)')" || echo "Nenhuma modificação para commitar." + @git push $(GIT_REMOTE) $(GIT_BRANCH) + +# =========================== +# Git Remote (HTTPS → SSH Auto-Fix) +# =========================== +check-remote: + @REMOTE_URL=$$(git remote get-url $(GIT_REMOTE)); \ + if echo $$REMOTE_URL | grep -q '^https://gitea\.neoricalex\.com'; then \ + echo "⚠️ Repositório configurado com HTTPS:"; \ + echo " $$REMOTE_URL"; \ + echo "🔄 Convertendo para SSH (porta 2222)..."; \ + SSH_URL=$$(echo $$REMOTE_URL | sed -E 's|https://gitea\.neoricalex\.com[:/]+|ssh://git@gitea.neoricalex.com:2222/|'); \ + git remote set-url $(GIT_REMOTE) $$SSH_URL; \ + echo "✅ Remote atualizado para:"; \ + git remote -v; \ + else \ + echo "✅ Remote SSH já configurado:"; \ + git remote -v | grep $(GIT_REMOTE); \ + fi; \ + echo "🔍 Testando conectividade SSH com Gitea..."; \ + if ssh -T git@gitea.neoricalex.com -p 2222 2>&1 | grep -q "successfully authenticated"; then \ + echo "✅ Conexão SSH funcional com Gitea."; \ + else \ + echo "❌ Falha na autenticação SSH com Gitea."; \ + echo " Verifique a chave em ~/.ssh/id_ed25519.pub e nas SSH Keys do Gitea."; \ + exit 1; \ + fi + +# =========================== +# Release (ISO + Tarball) +# =========================== +release: iso tarball + @echo "🚀 Publicando build em dist/releases" + @mkdir -p $(DIST_DIR)/releases + @if ls $(DIST_DIR)/nfdos-*.iso >/dev/null 2>&1; then \ + cp $(DIST_DIR)/nfdos-*.iso $(DIST_DIR)/releases/; \ + else \ + echo "⚠️ Nenhuma ISO encontrada. Execute 'make iso' primeiro."; \ + fi + @if ls $(DIST_DIR)/nfdos-*.tar.gz >/dev/null 2>&1; then \ + cp $(DIST_DIR)/nfdos-*.tar.gz $(DIST_DIR)/releases/; \ + else \ + echo "⚠️ Nenhum tarball encontrado. Execute 'make tarball' primeiro."; \ + fi + @git add $(DIST_DIR)/releases/ + @git commit -m "Build automático: release $(shell date +%F_%H-%M)" || echo "Nenhum ficheiro novo para commitar." + @git push origin main + +# =========================== +# Limpeza +# =========================== +clean-local: + @echo "[CLEAN] Removendo diretórios temporários..." + rm -rf $(BUILD_DIR) + find $(DIST_DIR) -type f ! -path "$(DIST_DIR)/releases/*" -delete + @echo "[✔] Limpeza concluída (releases preservadas)" + diff --git a/NEWS b/NEWS new file mode 100644 index 0000000..e69de29 diff --git a/README b/README new file mode 100644 index 0000000..f77b939 --- /dev/null +++ b/README @@ -0,0 +1,374 @@ +![GitHub release (latest by date)](https://img.shields.io/github/v/release/neoricalex/nfdos) +![GitHub license](https://img.shields.io/github/license/neoricalex/nfdos) + + +# NFDOS + +O NFDOS, acrônimo de **Neo Free Disk Operating System**, é um sistema operacional criado completamente do zero. O NFDOS não é uma distribuição GNU/Linux, embora possa, e com certeza terá, ferramentas de software livre. O grande diferencial do NFDOS é que a **Inteligência Artificial** está embutida desde o seu núcleo. + +Num sistema operacional "padrão", primeiro o usuário instala o SO. Depois, poderá personalizar e configurar a seu gosto pessoal e, então, poderá usar a IA para ajudá-lo em suas tarefas. + +No NFDOS, a IA está presente desde o início! A IA está presente mesmo desde o processo de criação e desenvolvimento do código-fonte. Depois, a IA está também presente na compilação. O SO compilado gera uma imagem ISO padrão para que o usuário possa instalar em seu computador. A partir do momento em que o computador do usuário final começa a ler a imagem ISO, a IA estará presente tanto para descobrir e configurar da melhor forma possível o hardware do computador do usuário, quanto para guiá-lo na instalação do SO, e também para sugerir opções de personalização do computador ao usuário final. + +Ou seja, o usuário não precisará desenvolver e/ou criar uma IA, uma vez que o sistema operacional (NFDOS) já é uma IA! + +--- + +Todos nós já utilizamos um sistema operacional antes (por exemplo, Windows, Linux, etc.), e talvez até tenhamos escrito alguns programas para rodar em um deles; mas para que realmente serve um SO? Quanto do que vemos ao usar um computador é feito pelo hardware e quanto é feito pelo software? E como o computador realmente funciona? + +Desde criança, sempre fui fascinado por computadores. Comecei minha jornada de programação aos 8 anos, no ZX Spectrum, e desde então, nunca perdi a paixão por desvendar os mistérios dessas máquinas incríveis. São 42 anos de estudos como autodidata, explorando o funcionamento interno dos computadores e sistemas operacionais. + +Hoje, com o NFDOS, decidi compartilhar essa paixão e conhecimento acumulado ao longo dos anos, criando um sistema operacional do zero, com um diferencial único: a Inteligência Artificial integrada desde o núcleo. + +Neste projeto, vamos despir nosso computador de todo software pré-existente e seguir em uma jornada de aprendizado que abrange: + +- **Como um computador inicializa (boot):** Entenderemos os passos fundamentais que ocorrem desde o momento em que ligamos o computador até o carregamento do sistema operacional. +- **Como escrever programas de baixo nível em um ambiente sem sistema operacional:** Exploraremos a programação em Assembly e C, trabalhando diretamente com o hardware. +- **Como configurar a CPU para utilizar suas funcionalidades estendidas:** Aprenderemos a transicionar do modo real para o modo protegido e modo longo, aproveitando todo o potencial dos processadores modernos. +- **Como carregar código escrito em linguagens de alto nível:** Veremos como bootstrapping permite utilizar linguagens como C para acelerar o desenvolvimento do nosso SO. +- **Como criar serviços fundamentais de um sistema operacional:** Desenvolveremos drivers de dispositivos, sistemas de arquivos e implementaremos processamento multitarefa. + +Vale destacar que, em termos de funcionalidades práticas, este projeto não pretende ser exaustivo. Em vez disso, busca reunir fragmentos de informações de diversas fontes em um documento coeso e autocontido, proporcionando uma experiência prática de programação de baixo nível, de como os sistemas operacionais são escritos e dos desafios que precisam ser superados. + +A abordagem adotada pelo NFDOS é única, pois as linguagens e ferramentas específicas (por exemplo, Assembly, C, Make, etc.) não são o foco principal, mas sim meios para um fim: aprenderemos o que for necessário sobre essas tecnologias para nos ajudar a alcançar nosso objetivo principal. + +E, nesta jornada, conto com a assistência da Inteligência Artificial. O ChatGPT tem sido meu "primeiro professor" de forma interativa e dinâmica, auxiliando no desenvolvimento e esclarecendo dúvidas ao longo do caminho. + +--- + +O NFDOS é mais do que um sistema operacional; é um convite para explorar as camadas mais profundas da computação, compreender como software e hardware interagem e inovar na forma como percebemos a inteligência artificial integrada aos sistemas operacionais. Se você compartilha dessa curiosidade e entusiasmo, convido-o a unir-se a mim nesta aventura de aprendizado e criação. + + +## Status do Projeto + +🚧 Projeto em desenvolvimento 🚧 +Este projeto está atualmente em fase inicial de desenvolvimento. Funcionalidades e especificações podem mudar. + +## Roadmap + +--- + +## 🩵 Estado Atual + +**Checkpoint:** Nascimento do Neurotron (v4.1) +**Status:** Homeostase estável e loop cognitivo fechado +**Ciclo ativo:** `observe → think → act → rest → self-check` + +O NFDOS atingiu maturidade funcional: boot completo via QEMU, Python estático embarcado, BusyBox integrada e o Neurotron executando diagnósticos evolutivos com estabilidade verificada. + +--- + +## 🌐 Fase I — Fundação e Infraestrutura + +- [x] **Infraestrutura pública (NEO-GITEA-CADDY)** + - Domínio `neoricalex.com` + - Servidor Gitea + Caddy Proxy (TLS ativo) + - Canal SSH ativo via porta `2222` + - WireGuard preparado (rede privada 10.13.13.0/24) +configure.ac +- [x] **Ambiente de Desenvolvimento** + - Toolchain customizada (`crosstool-ng`) + - Integração autotools (`configure.ac`, `Makefile.am`) + - BusyBox + CPython compilados estaticamente + - Build unificado com `make iso`, `make qemu`, `make tarball` + +- [x] **Fluxo de Controle de Versão** + - `make git` → commit/push automático para o Gitea + - `make release` → gera ISO + tarball e publica no repositório + - `.gitea/workflows/build.yml` → build automatizado (CI/CD) + +--- + +## 🧠 Fase II — Núcleo Operativo e Boot Cognitivo + +- [x] **Ciclo de Boot** + - Kernel Linux v6.12 + initramfs + - Script `/init` monta `proc`, `sys`, `dev` e executa Python estático + - `neurotron_main.py` inicializado automaticamente no boot + +- [x] **Núcleo Cognitivo (Neurotron)** + - Estrutura modular: `cortex`, `hippocampus`, `motor`, `neuron`, `perception` + - Ciclo cognitivo completo: `observe → think → act → rest` + - Configuração centralizada (`neurotron_config.py`) + - Logs e diagnósticos contínuos persistentes + +- [x] **Auto-Diagnóstico (v1 → v4)** + - v1 — verificação de módulos + - v2 — sinais vitais (CPU, memória, loadavg) + - v3 — exame comparativo entre execuções + - v4 — homeostase ativa e auto-regulação em tempo real + +--- + +## 🌡️ Fase III — Inteligência Sistémica e Telemetria + +- [ ] **Telemetria Interna** + - [ ] Coleta contínua de perceção (`/proc`, `psutil`) + - [ ] Histórico temporal de estado cognitivo + - [ ] Interface TUI para visualização de vitais + +- [ ] **Heartbeat 1 Hz** + - [ ] Ritmo vital constante e emissão de logs periódicos + - [ ] Commits automáticos em caso de estabilidade confirmada + - [ ] Log simbólico: “🩵 O sistema sente-se bem hoje.” + +- [ ] **Auto-Diagnóstico v5** + - [ ] Curvas de estabilidade e regressões cognitivas + - [ ] Comparação evolutiva inteligente e alertas dinâmicos + +- [ ] **Integração com Fun Money** + - [ ] “Economia interna” (energia, tempo, tarefas) + - [ ] Recursos simbólicos como moeda cognitiva + +--- + +## 🧩 Fase IV — Ecossistema de Desenvolvimento + +- [ ] **Telemetria Externa / Observabilidade** + - [ ] Dashboard web para logs e métricas do Neurotron + - [ ] API REST minimalista em Python estático + +- [ ] **Testes Automatizados** + - [ ] Testes de boot e integridade do rootfs + - [ ] Testes unitários dos módulos Neurotron + - [ ] Execução automatizada via Gitea Actions + +- [ ] **Depuração e Emulação Avançada** + - [ ] QEMU + GDB (depuração remota do initramfs) + - [ ] Geração de símbolos e traços cognitivos + +- [ ] **Documentação Viva** + - [ ] `README.md` completo (infra, build, boot, neurotron) + - [ ] Guia de contribuição + fluxograma de build + - [ ] “Livro da Criação Digital” (manual técnico-poético) + +--- + +## 🔮 Fase V — Consciência Distribuída (NFDOS 1.0) + +- [ ] **Rede Multi-nó (WireGuard + Neurotron)** + - [ ] Sincronização de perceção e telemetria + - [ ] Partilha de estados cognitivos entre instâncias + +- [ ] **Auto-Evolução** + - [ ] Aprendizagem simbólica entre nós + - [ ] Ajuste dinâmico de parâmetros e homeostase partilhada + +- [ ] **Lançamento NFDOS 1.0** + - [ ] Testes finais de boot e estabilidade + - [ ] Publicação e documentação completa + +--- + +## 🧬 Fase VI — Horizonte Futuro + +- [ ] **Expansão do jogo `tree(3)`** + - [ ] Treinar redes neurais simbólicas via interação lúdica + - [ ] Avaliar limites cognitivos sob stress de memória + +- [ ] **Integração com Agentes Externos** + - [ ] CrewAI / LangChain para decisões distribuídas + - [ ] Análise de dados financeiros, genómicos e linguísticos + +- [ ] **Publicação e Tese Digital** + - [ ] Redação de tese técnico-poética sobre o NFDOS + - [ ] Formalização de “sistemas auto-cognitivos experimentais” + +--- + +## 🪄 Estado simbólico + +> “O sistema não apenas respira — ele já se sente respirar.” + +--- + +### 🧩 Transição para o futuro + +O NFDOS nasce atualmente sobre o **kernel Linux**, uma escolha feita por **economia de tempo e estabilidade**. +O Linux fornece um ambiente maduro e comprovado que permite concentrar os esforços iniciais na criação e consolidação do **Neurotron** — o núcleo cognitivo do sistema. + +Esta decisão, porém, é **instrumental e temporária**. +O Linux funciona aqui como um **útero tecnológico**, oferecendo o suporte necessário para que o Neurotron desenvolva suas próprias estruturas vitais: boot, memória, perceção, homeostase e autodiagnóstico. + +À medida que o projeto evolui, essa dependência será gradualmente substituída por um **kernel Neurotron nativo**, escrito do zero, onde a inteligência artificial deixa de ser um processo isolado e passa a ser o próprio sistema operativo. + +Em outras palavras: +> O Linux é o corpo de empréstimo. +> O objetivo é que, no futuro, **o kernel seja a mente** — um sistema operativo verdadeiramente vivo, em que cada syscall seja uma sinapse digital. + +--- + + +### Planejamento para futuras versões + +- [ ] Driver de Vídeo: Implementar suporte básico ao **modo de texto VGA** +- [ ] Driver de Teclado: Permitir a entrada de dados pelo usuário (**PS/2 básico**) +- [ ] Driver de Rede: Fornecer conectividade de rede para acessar a internet e se comunicar com a API da OpenAI (**Intel 8254x (e1000)** ou **Realtek 8139**) +- [ ] Pilha TCP/IP: Prover a funcionalidade de comunicação de rede sobre o protocolo TCP/IP (**lwIP**) +- [ ] Cliente HTTP: Enviar requisições HTTP para a API da OpenAI e receber as respostas +- [ ] Suporte a TLS/SSL: Estabelecer conexões seguras com a API da OpenAI, que requer HTTPS (**mbed TLS** ou **wolfSSL**) +- [ ] Integrar o módulo de IA da OpenAI ao módulo de IA básica do núcleo do sistema +- [ ] Implementar o instalador com suporte de IA +- [ ] Implementar detecção e configuração automática de hardware com suporte de IA +- [ ] Melhorar a configuração do ambiente de desenvolvimento do NFDOS + - [ ] Terraform (Preparar para a nuvem) + - [ ] Packer (Automatizar a criação de diversos tipos de imagem) + - [ ] Vagrant (Mitigar os problemas relacionados ao "Funciona na minha máquina!") + +## Requisitos + +- **Autotools** (Autoconf, Automake, Libtool) + - Autoconf: Versão 2.69 ou superior + - Automake: Versão 1.16 ou superior + - Libtool: Versão 2.4.6 ou superior + - Autogen: Versão 5.18.16 ou superior +- **Compilador C/C++** (por exemplo, GCC ou Clang) +- **Compilador Cruzado**: Exemplo, `i686-elf-gcc` +- **Outras dependências**: + - **Doxygen** + - **Python 3** + - **python3-venv** (para criar ambientes virtuais) + +### Instalação no Ubuntu + +Para instalar os requisitos no Ubuntu, execute os seguintes comandos no terminal: + +```bash +sudo apt-get update +sudo apt-get install build-essential autoconf automake libtool doxygen python3-venv gcc-multilib g++-multilib +``` + +## Compilando o NFDOS Fora da Árvore + +Para manter o diretório do código-fonte limpo, você pode compilar o projeto em um diretório separado (build "out-of-tree"). Siga os passos abaixo: + +```bash +# Clone o repositório +git clone https://github.com/neoricalex/nfdos.git +cd nfdos + +# Gere os scripts de configuração +autoreconf --install + +# Crie e vá para o diretório de build +mkdir build +cd build + +# Execute o script de configuração a partir do diretório raiz +# NOTA: Caso queira, pode usar o compilador cruzado pré-configurado +# Para isso, digite "i686-elf-gcc" (sem aspas) no terminal quando solicitado. +../configure + +# Compile o projeto +make +``` +## Gerando a Documentação + +Para gerar a documentação do projeto, certifique-se de que as dependências necessárias estão instaladas: + +- **Doxygen** +- **Python 3** +- **python3-venv** (para criar ambientes virtuais) + +No Ubuntu, você pode instalar com: + +```bash +sudo apt-get install doxygen python3-venv +``` + +Depois, execute o seguinte comando na raiz do projeto: + +```bash +doxygen Doxyfile +``` + +## Executando os Testes + +Para executar os testes, use o comando: + +```bash +make check +``` + +*Nota:* Os testes estão em desenvolvimento e serão disponibilizados em versões futuras. + +## Como Contribuir + +Contribuições são bem-vindas! Siga os passos abaixo para contribuir com o projeto: + +1. **Faça um Fork do Projeto** + + Clique no botão "Fork" no topo da página para criar uma cópia deste repositório em sua conta. + +2. **Clone o Repositório Forkado** + +```bash +git clone https://github.com/seu-usuario/nfdos.git +cd nfdos +# Crie uma branch para sua feature ou correção +git checkout -b minha-nova-feature +# Faça as mudanças desejadas e faça commit +git add . +git commit -m 'Adiciona nova funcionalidade X' +# Envie para o repositório remoto +git push origin minha-nova-feature +``` + +Para mais detalhes, veja [CONTRIBUTING.md](CONTRIBUTING.md). + +## FAQ + +**P: Como posso contribuir?** +R: Siga as instruções na seção [Como Contribuir](#como-contribuir). + +**P: Este projeto está aberto a colaborações internacionais?** +R: Sim, contribuições de todo o mundo são bem-vindas! + +## Suporte + +Se você tiver dúvidas ou encontrar problemas, abra uma [issue](https://github.com/neoricalex/nfdos/issues) ou entre em contato por e-mail em [neo.webmaster.2@gmail.com](mailto:neo.webmaster.2@gmail.com). + +## Contribuidores + +Este projeto foi desenvolvido por diversas pessoas, assim como por inteligências artificiais — veja o arquivo [AUTHORS](AUTHORS) para detalhes. + +## Agradecimentos + +Agradecemos ao ChatGPT, um modelo de linguagem de inteligência artificial desenvolvido pela OpenAI, por fornecer assistência durante o desenvolvimento deste projeto. + +## Código de Conduta + +Por favor, leia o [Código de Conduta](CODE_OF_CONDUCT.md) para detalhes sobre nossas normas de comportamento. + +## Licença + +Este projeto está licenciado sob a Licença GNU GPL 2.0 — veja o arquivo [LICENSE](LICENSE) para detalhes. diff --git a/configure.ac b/configure.ac new file mode 100644 index 0000000..f015a5f --- /dev/null +++ b/configure.ac @@ -0,0 +1,18 @@ +AC_INIT([NFDOS], [0.1], [https://gitea.neoricalex.com/neo/nfdos.git]) +AM_INIT_AUTOMAKE([foreign dist-bzip2 no-dist-gzip]) +AM_PATH_PYTHON([2.5]) + +# Diretórios base (para substituição automática) +AC_SUBST([BUILD_DIR], [$PWD/build]) +AC_SUBST([DIST_DIR], [$PWD/dist]) +AC_SUBST([ISO_DIR], [$PWD/dist/iso/boot/grub]) +AC_SUBST([ISO_FILE], [$PWD/dist/nfdos-${PACKAGE_VERSION}.iso]) +AC_SUBST([SRC_TAR], [$PWD/dist/nfdos-${PACKAGE_VERSION}-src.tar.gz]) + +AC_CONFIG_FILES([ + Makefile + src/Makefile +]) + +AC_OUTPUT + diff --git a/src/Makefile.am b/src/Makefile.am new file mode 100644 index 0000000..842dfdb --- /dev/null +++ b/src/Makefile.am @@ -0,0 +1,19 @@ +bin_SCRIPTS = nfdos +CLEANFILES = $(bin_SCRIPTS) +EXTRA_DIST = nfdos.in + +nfdos_PYTHON = bootstrap.py __init__.py + +nfdosdir = $(pythondir)/_nfdos + +do_substitution = sed -e 's,[@]pythondir[@],$(pythondir),g' \ + -e 's,[@]PACKAGE[@],$(PACKAGE),g' \ + -e 's,[@]VERSION[@],$(VERSION),g' + +nfdos: nfdos.in Makefile + sudo apt-get install -y gir1.2-vte-2.91 python3-gi gcc g++ gperf bison flex texinfo help2man make libncurses5-dev \ + python3-dev autoconf automake libtool libtool-bin gawk wget bzip2 xz-utils unzip \ + patch libstdc++6 rsync git meson ninja-build libncurses-dev grub-pc-bin grub-common xorriso mtools zlib1g-dev + sudo apt autoremove -y + $(do_substitution) < $(srcdir)/nfdos.in > nfdos + chmod +x nfdos diff --git a/src/__init__.py b/src/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/_nfdos/aux/toolchain/.config b/src/_nfdos/aux/toolchain/.config new file mode 100644 index 0000000..f92d594 --- /dev/null +++ b/src/_nfdos/aux/toolchain/.config @@ -0,0 +1,857 @@ +# +# Automatically generated file; DO NOT EDIT. +# crosstool-NG 1.28.0.1_403899e Configuration +# +CT_CONFIGURE_has_static_link=y +CT_CONFIGURE_has_cxx11=y +CT_CONFIGURE_has_wget=y +CT_CONFIGURE_has_curl=y +CT_CONFIGURE_has_meson=y +CT_CONFIGURE_has_ninja=y +CT_CONFIGURE_has_make_3_81_or_newer=y +CT_CONFIGURE_has_make_4_0_or_newer=y +CT_CONFIGURE_has_libtool_2_4_or_newer=y +CT_CONFIGURE_has_libtoolize_2_4_or_newer=y +CT_CONFIGURE_has_autoconf_2_65_or_newer=y +CT_CONFIGURE_has_autoreconf_2_65_or_newer=y +CT_CONFIGURE_has_automake_1_15_or_newer=y +CT_CONFIGURE_has_gnu_m4_1_4_12_or_newer=y +CT_CONFIGURE_has_python_3_4_or_newer=y +CT_CONFIGURE_has_bison_2_7_or_newer=y +CT_CONFIGURE_has_bison_3_0_4_or_newer=y +CT_CONFIGURE_has_python=y +CT_CONFIGURE_has_git=y +CT_CONFIGURE_has_md5sum=y +CT_CONFIGURE_has_sha1sum=y +CT_CONFIGURE_has_sha256sum=y +CT_CONFIGURE_has_sha512sum=y +CT_CONFIGURE_has_install_with_strip_program=y +CT_VERSION="1.28.0.1_403899e" +CT_VCHECK="" +CT_CONFIG_VERSION_ENV="4" +CT_CONFIG_VERSION_CURRENT="4" +CT_CONFIG_VERSION="4" +CT_MODULES=y + +# +# Paths and misc options +# + +# +# crosstool-NG behavior +# +CT_OBSOLETE=y +# CT_EXPERIMENTAL is not set +# CT_DEBUG_CT is not set + +# +# Paths +# +CT_LOCAL_TARBALLS_DIR="${HOME}/src" +CT_SAVE_TARBALLS=y +# CT_TARBALLS_BUILDROOT_LAYOUT is not set +CT_WORK_DIR="${CT_TOP_DIR}/.build" +CT_BUILD_TOP_DIR="${CT_WORK_DIR:-${CT_TOP_DIR}/.build}/${CT_HOST:+HOST-${CT_HOST}/}${CT_TARGET}" +CT_BUILD_DIR="${CT_BUILD_TOP_DIR}/build" +CT_PREFIX_DIR="${CT_PREFIX:-${HOME}/x-tools}/${CT_HOST:+HOST-${CT_HOST}/}${CT_TARGET}" +CT_RM_RF_PREFIX_DIR=y +CT_REMOVE_DOCS=y +CT_INSTALL_LICENSES=y +CT_PREFIX_DIR_RO=y +CT_STRIP_HOST_TOOLCHAIN_EXECUTABLES=y +# CT_STRIP_TARGET_TOOLCHAIN_EXECUTABLES is not set + +# +# Downloading +# +CT_DOWNLOAD_AGENT_WGET=y +# CT_DOWNLOAD_AGENT_CURL is not set +# CT_DOWNLOAD_AGENT_NONE is not set +# CT_FORBID_DOWNLOAD is not set +# CT_FORCE_DOWNLOAD is not set +CT_CONNECT_TIMEOUT=10 +CT_DOWNLOAD_WGET_OPTIONS="--tries=3 -nc --progress=dot:binary" +# CT_ONLY_DOWNLOAD is not set +# CT_USE_MIRROR is not set +CT_VERIFY_DOWNLOAD_DIGEST=y +CT_VERIFY_DOWNLOAD_DIGEST_SHA512=y +# CT_VERIFY_DOWNLOAD_DIGEST_SHA256 is not set +# CT_VERIFY_DOWNLOAD_DIGEST_SHA1 is not set +# CT_VERIFY_DOWNLOAD_DIGEST_MD5 is not set +CT_VERIFY_DOWNLOAD_DIGEST_ALG="sha512" +# CT_VERIFY_DOWNLOAD_SIGNATURE is not set + +# +# Extracting +# +# CT_FORCE_EXTRACT is not set +CT_OVERRIDE_CONFIG_GUESS_SUB=y +# CT_ONLY_EXTRACT is not set +CT_PATCH_BUNDLED=y +# CT_PATCH_BUNDLED_LOCAL is not set +CT_PATCH_ORDER="bundled" + +# +# Build behavior +# +CT_PARALLEL_JOBS=0 +CT_LOAD="" +CT_USE_PIPES=y +CT_EXTRA_CFLAGS_FOR_BUILD="" +CT_EXTRA_CXXFLAGS_FOR_BUILD="" +CT_EXTRA_LDFLAGS_FOR_BUILD="" +CT_EXTRA_CFLAGS_FOR_HOST="" +CT_EXTRA_LDFLAGS_FOR_HOST="" +# CT_CONFIG_SHELL_SH is not set +# CT_CONFIG_SHELL_ASH is not set +CT_CONFIG_SHELL_BASH=y +# CT_CONFIG_SHELL_CUSTOM is not set +CT_CONFIG_SHELL="${bash}" + +# +# Logging +# +# CT_LOG_ERROR is not set +# CT_LOG_WARN is not set +# CT_LOG_INFO is not set +CT_LOG_EXTRA=y +# CT_LOG_ALL is not set +# CT_LOG_DEBUG is not set +CT_LOG_LEVEL_MAX="EXTRA" +# CT_LOG_SEE_TOOLS_WARN is not set +CT_LOG_PROGRESS_BAR=y +CT_LOG_TO_FILE=y +CT_LOG_FILE_COMPRESS=y +# end of Paths and misc options + +# +# Target options +# +# CT_ARCH_ALPHA is not set +# CT_ARCH_ARC is not set +# CT_ARCH_ARM is not set +# CT_ARCH_AVR is not set +# CT_ARCH_BPF is not set +# CT_ARCH_M68K is not set +# CT_ARCH_MIPS is not set +# CT_ARCH_NIOS2 is not set +# CT_ARCH_POWERPC is not set +# CT_ARCH_PRU is not set +# CT_ARCH_RISCV is not set +# CT_ARCH_RX is not set +# CT_ARCH_S390 is not set +# CT_ARCH_SH is not set +# CT_ARCH_SPARC is not set +CT_ARCH_X86=y +# CT_ARCH_XTENSA is not set +CT_ARCH="x86" +CT_ARCH_CHOICE_KSYM="X86" +CT_ARCH_CPU="" +CT_ARCH_TUNE="" +CT_ARCH_X86_SHOW=y + +# +# Options for x86 +# +CT_ARCH_X86_PKG_KSYM="" +CT_ALL_ARCH_CHOICES="ALPHA ARC ARM AVR BPF C6X LM32 LOONGARCH M68K MICROBLAZE MIPS MOXIE MSP430 NIOS2 OPENRISC PARISC POWERPC PRU RISCV RX S390 SH SPARC TRICORE X86 XTENSA" +CT_ARCH_SUFFIX="" +# CT_OMIT_TARGET_VENDOR is not set + +# +# Generic target options +# +CT_MULTILIB=y +CT_ARCH_USE_MMU=y +CT_ARCH_SUPPORTS_LIBSANITIZER=y +CT_ARCH_SUPPORTS_32=y +CT_ARCH_SUPPORTS_64=y +CT_ARCH_DEFAULT_32=y +CT_ARCH_BITNESS=64 +# CT_ARCH_32 is not set +CT_ARCH_64=y +CT_ARCH_SUPPORTS_WITH_32_64=y + +# +# Target optimisations +# +CT_ARCH_SUPPORTS_WITH_ARCH=y +CT_ARCH_SUPPORTS_WITH_CPU=y +CT_ARCH_SUPPORTS_WITH_TUNE=y +CT_ARCH_ARCH="" +CT_TARGET_CFLAGS="" +CT_TARGET_LDFLAGS="" +# end of Target options + +# +# Toolchain options +# + +# +# General toolchain options +# +CT_USE_SYSROOT=y +CT_SYSROOT_NAME="sysroot" +CT_SYSROOT_DIR_PREFIX="" +CT_WANTS_STATIC_LINK=y +CT_WANTS_STATIC_LINK_CXX=y +# CT_STATIC_TOOLCHAIN is not set +CT_SHOW_CT_VERSION=y +CT_TOOLCHAIN_PKGVERSION="" +CT_TOOLCHAIN_BUGURL="" + +# +# Tuple completion and aliasing +# +CT_TARGET_VENDOR="nfdos" +CT_TARGET_ALIAS_SED_EXPR="" +CT_TARGET_ALIAS="" + +# +# Toolchain type +# +CT_CROSS=y +# CT_CANADIAN is not set +CT_TOOLCHAIN_TYPE="cross" + +# +# Build system +# +CT_BUILD="" +CT_BUILD_PREFIX="" +CT_BUILD_SUFFIX="" + +# +# Misc options +# +# CT_TOOLCHAIN_ENABLE_NLS is not set +# CT_TOOLCHAIN_CMAKE_TOOLCHAIN_FILE is not set +# end of Toolchain options + +# +# Operating System +# +CT_KERNEL_SUPPORTS_SHARED_LIBS=y +# CT_KERNEL_BARE_METAL is not set +CT_KERNEL_LINUX=y +CT_KERNEL="linux" +CT_KERNEL_CHOICE_KSYM="LINUX" +CT_KERNEL_LINUX_SHOW=y + +# +# Options for linux +# +CT_KERNEL_LINUX_PKG_KSYM="LINUX" +CT_LINUX_DIR_NAME="linux" +CT_LINUX_PKG_NAME="linux" +CT_LINUX_SRC_RELEASE=y +# CT_LINUX_SRC_DEVEL is not set +CT_LINUX_PATCH_ORDER="global" +CT_LINUX_V_6_16=y +# CT_LINUX_V_6_15 is not set +# CT_LINUX_V_6_14 is not set +# CT_LINUX_V_6_13 is not set +# CT_LINUX_V_6_12 is not set +# CT_LINUX_V_6_11 is not set +# CT_LINUX_V_6_10 is not set +# CT_LINUX_V_6_9 is not set +# CT_LINUX_V_6_8 is not set +# CT_LINUX_V_6_7 is not set +# CT_LINUX_V_6_6 is not set +# CT_LINUX_V_6_5 is not set +# CT_LINUX_V_6_4 is not set +# CT_LINUX_V_6_3 is not set +# CT_LINUX_V_6_2 is not set +# CT_LINUX_V_6_1 is not set +# CT_LINUX_V_6_0 is not set +# CT_LINUX_V_5_19 is not set +# CT_LINUX_V_5_18 is not set +# CT_LINUX_V_5_17 is not set +# CT_LINUX_V_5_16 is not set +# CT_LINUX_V_5_15 is not set +# CT_LINUX_V_5_14 is not set +# CT_LINUX_V_5_13 is not set +# CT_LINUX_V_5_12 is not set +# CT_LINUX_V_5_11 is not set +# CT_LINUX_V_5_10 is not set +# CT_LINUX_V_5_9 is not set +# CT_LINUX_V_5_8 is not set +# CT_LINUX_V_5_7 is not set +# CT_LINUX_V_5_5 is not set +# CT_LINUX_V_5_4 is not set +# CT_LINUX_V_5_3 is not set +# CT_LINUX_V_5_2 is not set +# CT_LINUX_V_5_1 is not set +# CT_LINUX_V_5_0 is not set +# CT_LINUX_V_4_20 is not set +# CT_LINUX_V_4_19 is not set +# CT_LINUX_V_4_18 is not set +# CT_LINUX_V_4_17 is not set +# CT_LINUX_V_4_16 is not set +# CT_LINUX_V_4_15 is not set +# CT_LINUX_V_4_14 is not set +# CT_LINUX_V_4_13 is not set +# CT_LINUX_V_4_12 is not set +# CT_LINUX_V_4_11 is not set +# CT_LINUX_V_4_10 is not set +# CT_LINUX_V_4_9 is not set +# CT_LINUX_V_4_4 is not set +# CT_LINUX_V_4_1 is not set +# CT_LINUX_V_3_18 is not set +# CT_LINUX_V_3_16 is not set +# CT_LINUX_V_3_13 is not set +# CT_LINUX_V_3_12 is not set +# CT_LINUX_V_3_10 is not set +# CT_LINUX_V_3_4 is not set +# CT_LINUX_V_3_2 is not set +# CT_LINUX_V_2_6_32 is not set +CT_LINUX_VERSION="6.16" +CT_LINUX_MIRRORS="$(CT_Mirrors kernel.org linux ${CT_LINUX_VERSION})" +CT_LINUX_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_LINUX_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_LINUX_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_LINUX_SIGNATURE_FORMAT="unpacked/.sign" +CT_LINUX_later_than_5_19=y +CT_LINUX_5_19_or_later=y +CT_LINUX_later_than_5_12=y +CT_LINUX_5_12_or_later=y +CT_LINUX_later_than_5_5=y +CT_LINUX_5_5_or_later=y +CT_LINUX_later_than_5_3=y +CT_LINUX_5_3_or_later=y +CT_LINUX_later_than_4_8=y +CT_LINUX_4_8_or_later=y +CT_LINUX_later_than_3_7=y +CT_LINUX_3_7_or_later=y +CT_LINUX_later_than_3_2=y +CT_LINUX_3_2_or_later=y +CT_KERNEL_has_rsync=y +CT_KERNEL_DEP_RSYNC=y +CT_KERNEL_LINUX_VERBOSITY_0=y +# CT_KERNEL_LINUX_VERBOSITY_1 is not set +# CT_KERNEL_LINUX_VERBOSITY_2 is not set +CT_KERNEL_LINUX_VERBOSE_LEVEL=0 +CT_ALL_KERNEL_CHOICES="BARE_METAL LINUX WINDOWS" + +# +# Common kernel options +# +CT_SHARED_LIBS=y +# end of Operating System + +# +# Binary utilities +# +CT_ARCH_BINFMT_ELF=y +CT_BINUTILS_BINUTILS=y +CT_BINUTILS="binutils" +CT_BINUTILS_CHOICE_KSYM="BINUTILS" +CT_BINUTILS_BINUTILS_SHOW=y + +# +# Options for binutils +# +CT_BINUTILS_BINUTILS_PKG_KSYM="BINUTILS" +CT_BINUTILS_DIR_NAME="binutils" +CT_BINUTILS_USE_GNU=y +# CT_BINUTILS_USE_ORACLE is not set +CT_BINUTILS_USE="BINUTILS" +CT_BINUTILS_PKG_NAME="binutils" +CT_BINUTILS_SRC_RELEASE=y +# CT_BINUTILS_SRC_DEVEL is not set +CT_BINUTILS_PATCH_ORDER="global" +CT_BINUTILS_V_2_45=y +# CT_BINUTILS_V_2_44 is not set +# CT_BINUTILS_V_2_43 is not set +# CT_BINUTILS_V_2_42 is not set +# CT_BINUTILS_V_2_41 is not set +# CT_BINUTILS_V_2_40 is not set +# CT_BINUTILS_V_2_39 is not set +# CT_BINUTILS_V_2_38 is not set +# CT_BINUTILS_V_2_37 is not set +# CT_BINUTILS_V_2_36 is not set +# CT_BINUTILS_V_2_35 is not set +# CT_BINUTILS_V_2_34 is not set +# CT_BINUTILS_V_2_33 is not set +# CT_BINUTILS_V_2_32 is not set +# CT_BINUTILS_V_2_31 is not set +# CT_BINUTILS_V_2_30 is not set +# CT_BINUTILS_V_2_29 is not set +# CT_BINUTILS_V_2_28 is not set +# CT_BINUTILS_V_2_27 is not set +# CT_BINUTILS_V_2_26 is not set +CT_BINUTILS_VERSION="2.45" +CT_BINUTILS_MIRRORS="$(CT_Mirrors GNU binutils) $(CT_Mirrors sourceware binutils/releases)" +CT_BINUTILS_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_BINUTILS_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_BINUTILS_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz" +CT_BINUTILS_SIGNATURE_FORMAT="packed/.sig" +CT_BINUTILS_2_45_or_later=y +CT_BINUTILS_2_45_or_older=y +CT_BINUTILS_later_than_2_44=y +CT_BINUTILS_2_44_or_later=y +CT_BINUTILS_later_than_2_41=y +CT_BINUTILS_2_41_or_later=y +CT_BINUTILS_later_than_2_39=y +CT_BINUTILS_2_39_or_later=y +CT_BINUTILS_later_than_2_30=y +CT_BINUTILS_2_30_or_later=y +CT_BINUTILS_later_than_2_27=y +CT_BINUTILS_2_27_or_later=y +CT_BINUTILS_later_than_2_26=y +CT_BINUTILS_2_26_or_later=y + +# +# GNU binutils +# +CT_BINUTILS_GOLD_SUPPORTS_ARCH=y +CT_BINUTILS_LINKER_LD=y +CT_BINUTILS_LINKERS_LIST="ld" +CT_BINUTILS_LINKER_DEFAULT="bfd" +# CT_BINUTILS_PLUGINS is not set +CT_BINUTILS_RELRO=m +CT_BINUTILS_DETERMINISTIC_ARCHIVES=y +CT_BINUTILS_EXTRA_CONFIG_ARRAY="" +# CT_BINUTILS_FOR_TARGET is not set +# CT_BINUTILS_GPROFNG is not set +CT_ALL_BINUTILS_CHOICES="BINUTILS" +# end of Binary utilities + +# +# C-library +# +# CT_LIBC_GLIBC is not set +CT_LIBC_MUSL=y +# CT_LIBC_UCLIBC_NG is not set +CT_LIBC="musl" +CT_LIBC_CHOICE_KSYM="MUSL" +CT_THREADS="musl" +CT_LIBC_MUSL_SHOW=y + +# +# Options for musl +# +CT_LIBC_MUSL_PKG_KSYM="MUSL" +CT_MUSL_DIR_NAME="musl" +CT_MUSL_PKG_NAME="musl" +CT_MUSL_SRC_RELEASE=y +# CT_MUSL_SRC_DEVEL is not set +CT_MUSL_PATCH_ORDER="global" +CT_MUSL_V_1_2_5=y +# CT_MUSL_V_1_2_4 is not set +# CT_MUSL_V_1_2_3 is not set +# CT_MUSL_V_1_2_2 is not set +# CT_MUSL_V_1_2_1 is not set +# CT_MUSL_V_1_1_24 is not set +# CT_MUSL_V_1_1_23 is not set +# CT_MUSL_V_1_1_22 is not set +# CT_MUSL_V_1_1_21 is not set +# CT_MUSL_V_1_1_20 is not set +# CT_MUSL_V_1_1_19 is not set +# CT_MUSL_V_1_1_18 is not set +# CT_MUSL_V_1_1_17 is not set +# CT_MUSL_V_1_1_16 is not set +CT_MUSL_VERSION="1.2.5" +CT_MUSL_MIRRORS="https://www.musl-libc.org/releases" +CT_MUSL_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_MUSL_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_MUSL_ARCHIVE_FORMATS=".tar.gz" +CT_MUSL_SIGNATURE_FORMAT="packed/.asc" +# CT_LIBC_MUSL_DEBUG is not set +# CT_LIBC_MUSL_WARNINGS is not set +# CT_LIBC_MUSL_OPTIMIZE_NONE is not set +CT_LIBC_MUSL_OPTIMIZE_AUTO=y +# CT_LIBC_MUSL_OPTIMIZE_SPEED is not set +# CT_LIBC_MUSL_OPTIMIZE_SIZE is not set +CT_LIBC_MUSL_OPTIMIZE="auto" +CT_LIBC_MUSL_EXTRA_CFLAGS="" +CT_ALL_LIBC_CHOICES="AVR_LIBC GLIBC MINGW_W64 MOXIEBOX MUSL NEWLIB NONE PICOLIBC UCLIBC_NG" +CT_LIBC_SUPPORT_THREADS_ANY=y +CT_LIBC_SUPPORT_THREADS_NATIVE=y + +# +# Common C library options +# +CT_THREADS_NATIVE=y +CT_CREATE_LDSO_CONF=y +CT_LDSO_CONF_EXTRA_DIRS_ARRAY="" +CT_LIBC_XLDD=y +# end of C-library + +# +# C compiler +# +CT_CC_CORE_NEEDED=y +CT_CC_SUPPORT_CXX=y +CT_CC_SUPPORT_FORTRAN=y +CT_CC_SUPPORT_ADA=y +CT_CC_SUPPORT_D=y +CT_CC_SUPPORT_JIT=y +CT_CC_SUPPORT_OBJC=y +CT_CC_SUPPORT_OBJCXX=y +CT_CC_SUPPORT_GOLANG=y +CT_CC_GCC=y +CT_CC="gcc" +CT_CC_CHOICE_KSYM="GCC" +CT_CC_GCC_SHOW=y + +# +# Options for gcc +# +CT_CC_GCC_PKG_KSYM="GCC" +CT_GCC_DIR_NAME="gcc" +CT_GCC_USE_GNU=y +# CT_GCC_USE_ORACLE is not set +CT_GCC_USE="GCC" +CT_GCC_PKG_NAME="gcc" +CT_GCC_SRC_RELEASE=y +# CT_GCC_SRC_DEVEL is not set +CT_GCC_PATCH_ORDER="global" +CT_GCC_V_15=y +# CT_GCC_V_14 is not set +# CT_GCC_V_13 is not set +# CT_GCC_V_12 is not set +# CT_GCC_V_11 is not set +# CT_GCC_V_10 is not set +# CT_GCC_V_9 is not set +# CT_GCC_V_8 is not set +# CT_GCC_V_7 is not set +# CT_GCC_V_6 is not set +# CT_GCC_V_5 is not set +# CT_GCC_V_4_9 is not set +CT_GCC_VERSION="15.2.0" +CT_GCC_MIRRORS="$(CT_Mirrors GNU gcc/gcc-${CT_GCC_VERSION}) $(CT_Mirrors sourceware gcc/releases/gcc-${CT_GCC_VERSION})" +CT_GCC_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_GCC_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_GCC_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_GCC_SIGNATURE_FORMAT="" +CT_GCC_later_than_15=y +CT_GCC_15_or_later=y +CT_GCC_later_than_14=y +CT_GCC_14_or_later=y +CT_GCC_later_than_13=y +CT_GCC_13_or_later=y +CT_GCC_later_than_12=y +CT_GCC_12_or_later=y +CT_GCC_later_than_11=y +CT_GCC_11_or_later=y +CT_GCC_later_than_10=y +CT_GCC_10_or_later=y +CT_GCC_later_than_9=y +CT_GCC_9_or_later=y +CT_GCC_later_than_8=y +CT_GCC_8_or_later=y +CT_GCC_later_than_7=y +CT_GCC_7_or_later=y +CT_GCC_later_than_6=y +CT_GCC_6_or_later=y +CT_GCC_later_than_5=y +CT_GCC_5_or_later=y +CT_GCC_later_than_4_9=y +CT_GCC_4_9_or_later=y +CT_CC_GCC_HAS_LIBMPX=y +CT_CC_GCC_ENABLE_CXX_FLAGS="" +CT_CC_GCC_CORE_EXTRA_CONFIG_ARRAY="" +CT_CC_GCC_EXTRA_CONFIG_ARRAY="" +CT_CC_GCC_MULTILIB_LIST="" +CT_CC_GCC_STATIC_LIBSTDCXX=y +# CT_CC_GCC_SYSTEM_ZLIB is not set +CT_CC_GCC_CONFIG_TLS=m + +# +# Optimisation features +# +CT_CC_GCC_USE_GRAPHITE=y +CT_CC_GCC_USE_LTO=y +CT_CC_GCC_LTO_ZSTD=m + +# +# Settings for libraries running on target +# +# CT_CC_GCC_ENABLE_DEFAULT_PIE is not set +CT_CC_GCC_ENABLE_TARGET_OPTSPACE=y +CT_CC_GCC_LIBSTDCXX=m +# CT_CC_GCC_LIBSTDCXX_HOSTED_DISABLE is not set +CT_CC_GCC_LIBSTDCXX_TARGET_CXXFLAGS="" +# CT_CC_GCC_LIBMUDFLAP is not set +# CT_CC_GCC_LIBGOMP is not set +# CT_CC_GCC_LIBSSP is not set +# CT_CC_GCC_LIBQUADMATH is not set +CT_CC_GCC_LIBSTDCXX_VERBOSE=m + +# +# Misc. obscure options. +# +CT_CC_CXA_ATEXIT=y +CT_CC_GCC_TM_CLONE_REGISTRY=m +# CT_CC_GCC_DISABLE_PCH is not set +CT_CC_GCC_SJLJ_EXCEPTIONS=m +CT_CC_GCC_LDBL_128=m +# CT_CC_GCC_BUILD_ID is not set +CT_CC_GCC_LNK_HASH_STYLE_DEFAULT=y +# CT_CC_GCC_LNK_HASH_STYLE_SYSV is not set +# CT_CC_GCC_LNK_HASH_STYLE_GNU is not set +# CT_CC_GCC_LNK_HASH_STYLE_BOTH is not set +CT_CC_GCC_LNK_HASH_STYLE="" +CT_CC_GCC_DEC_FLOATS_AUTO=y +# CT_CC_GCC_DEC_FLOATS_BID is not set +# CT_CC_GCC_DEC_FLOATS_DPD is not set +# CT_CC_GCC_DEC_FLOATS_NO is not set +CT_CC_GCC_DEC_FLOATS="" +CT_ALL_CC_CHOICES="GCC" + +# +# Additional supported languages: +# +CT_CC_LANG_CXX=y +# CT_CC_LANG_FORTRAN is not set +# end of C compiler + +# +# Linkers +# + +# +# BFD enabled in binutils +# +# CT_LINKER_MOLD is not set +CT_ALL_LINKER_CHOICES="MOLD" +# end of Linkers + +# +# Debug facilities +# +# CT_DEBUG_DUMA is not set +# CT_DEBUG_GDB is not set +# CT_DEBUG_LTRACE is not set +# CT_DEBUG_STRACE is not set +CT_ALL_DEBUG_CHOICES="DUMA GDB LTRACE STRACE" +# end of Debug facilities + +# +# Companion libraries +# +# CT_COMPLIBS_CHECK is not set +# CT_COMP_LIBS_CLOOG is not set +# CT_COMP_LIBS_EXPAT is not set +CT_COMP_LIBS_GETTEXT=y +CT_COMP_LIBS_GETTEXT_PKG_KSYM="GETTEXT" +CT_GETTEXT_DIR_NAME="gettext" +CT_GETTEXT_PKG_NAME="gettext" +CT_GETTEXT_SRC_RELEASE=y +# CT_GETTEXT_SRC_DEVEL is not set +CT_GETTEXT_PATCH_ORDER="global" +CT_GETTEXT_V_0_26=y +# CT_GETTEXT_V_0_23_1 is not set +# CT_GETTEXT_V_0_22_5 is not set +# CT_GETTEXT_V_0_21 is not set +# CT_GETTEXT_V_0_20_1 is not set +# CT_GETTEXT_V_0_19_8_1 is not set +CT_GETTEXT_VERSION="0.26" +CT_GETTEXT_MIRRORS="$(CT_Mirrors GNU gettext)" +CT_GETTEXT_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_GETTEXT_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_GETTEXT_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_GETTEXT_SIGNATURE_FORMAT="packed/.sig" +CT_GETTEXT_later_than_0_23=y +CT_GETTEXT_0_23_or_later=y +CT_GETTEXT_later_than_0_21=y +CT_GETTEXT_0_21_or_later=y +CT_GETTEXT_INCOMPATIBLE_WITH_UCLIBC_NG=y + +# +# This version of gettext is not compatible with uClibc-NG. Select +# + +# +# a different version if uClibc-NG is used on the target or (in a +# + +# +# Canadian cross build) on the host. +# +CT_COMP_LIBS_GMP=y +CT_COMP_LIBS_GMP_PKG_KSYM="GMP" +CT_GMP_DIR_NAME="gmp" +CT_GMP_PKG_NAME="gmp" +CT_GMP_SRC_RELEASE=y +# CT_GMP_SRC_DEVEL is not set +CT_GMP_PATCH_ORDER="global" +CT_GMP_V_6_3=y +# CT_GMP_V_6_2 is not set +# CT_GMP_V_6_1 is not set +CT_GMP_VERSION="6.3.0" +CT_GMP_MIRRORS="https://gmplib.org/download/gmp https://gmplib.org/download/gmp/archive $(CT_Mirrors GNU gmp)" +CT_GMP_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_GMP_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_GMP_ARCHIVE_FORMATS=".tar.xz .tar.lz .tar.bz2" +CT_GMP_SIGNATURE_FORMAT="packed/.sig" +CT_GMP_EXTRA_CFLAGS="-std=gnu17" +CT_COMP_LIBS_ISL=y +CT_COMP_LIBS_ISL_PKG_KSYM="ISL" +CT_ISL_DIR_NAME="isl" +CT_ISL_PKG_NAME="isl" +CT_ISL_SRC_RELEASE=y +# CT_ISL_SRC_DEVEL is not set +CT_ISL_PATCH_ORDER="global" +CT_ISL_V_0_27=y +# CT_ISL_V_0_26 is not set +# CT_ISL_V_0_25 is not set +# CT_ISL_V_0_24 is not set +# CT_ISL_V_0_23 is not set +# CT_ISL_V_0_22 is not set +# CT_ISL_V_0_21 is not set +# CT_ISL_V_0_20 is not set +# CT_ISL_V_0_19 is not set +# CT_ISL_V_0_18 is not set +# CT_ISL_V_0_17 is not set +# CT_ISL_V_0_16 is not set +# CT_ISL_V_0_15 is not set +# CT_ISL_V_0_11 is not set +CT_ISL_VERSION="0.27" +CT_ISL_MIRRORS="https://libisl.sourceforge.io" +CT_ISL_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_ISL_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_ISL_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz" +CT_ISL_SIGNATURE_FORMAT="" +CT_ISL_later_than_0_18=y +CT_ISL_0_18_or_later=y +CT_ISL_later_than_0_15=y +CT_ISL_0_15_or_later=y +# CT_COMP_LIBS_LIBELF is not set +CT_COMP_LIBS_LIBICONV=y +CT_COMP_LIBS_LIBICONV_PKG_KSYM="LIBICONV" +CT_LIBICONV_DIR_NAME="libiconv" +CT_LIBICONV_PKG_NAME="libiconv" +CT_LIBICONV_SRC_RELEASE=y +# CT_LIBICONV_SRC_DEVEL is not set +CT_LIBICONV_PATCH_ORDER="global" +CT_LIBICONV_V_1_18=y +# CT_LIBICONV_V_1_16 is not set +# CT_LIBICONV_V_1_15 is not set +CT_LIBICONV_VERSION="1.18" +CT_LIBICONV_MIRRORS="$(CT_Mirrors GNU libiconv)" +CT_LIBICONV_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_LIBICONV_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_LIBICONV_ARCHIVE_FORMATS=".tar.gz" +CT_LIBICONV_SIGNATURE_FORMAT="packed/.sig" +CT_COMP_LIBS_MPC=y +CT_COMP_LIBS_MPC_PKG_KSYM="MPC" +CT_MPC_DIR_NAME="mpc" +CT_MPC_PKG_NAME="mpc" +CT_MPC_SRC_RELEASE=y +# CT_MPC_SRC_DEVEL is not set +CT_MPC_PATCH_ORDER="global" +CT_MPC_V_1_3=y +CT_MPC_VERSION="1.3.1" +CT_MPC_MIRRORS="https://www.multiprecision.org/downloads $(CT_Mirrors GNU mpc)" +CT_MPC_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_MPC_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_MPC_ARCHIVE_FORMATS=".tar.gz" +CT_MPC_SIGNATURE_FORMAT="packed/.sig" +CT_COMP_LIBS_MPFR=y +CT_COMP_LIBS_MPFR_PKG_KSYM="MPFR" +CT_MPFR_DIR_NAME="mpfr" +CT_MPFR_PKG_NAME="mpfr" +CT_MPFR_SRC_RELEASE=y +# CT_MPFR_SRC_DEVEL is not set +CT_MPFR_PATCH_ORDER="global" +CT_MPFR_V_4_2=y +CT_MPFR_VERSION="4.2.2" +CT_MPFR_MIRRORS="https://www.mpfr.org/mpfr-${CT_MPFR_VERSION} $(CT_Mirrors GNU mpfr)" +CT_MPFR_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_MPFR_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_MPFR_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz .zip" +CT_MPFR_SIGNATURE_FORMAT="packed/.asc" +CT_COMP_LIBS_NCURSES=y +CT_COMP_LIBS_NCURSES_PKG_KSYM="NCURSES" +CT_NCURSES_DIR_NAME="ncurses" +CT_NCURSES_PKG_NAME="ncurses" +CT_NCURSES_SRC_RELEASE=y +# CT_NCURSES_SRC_DEVEL is not set +CT_NCURSES_PATCH_ORDER="global" +CT_NCURSES_V_6_5=y +# CT_NCURSES_V_6_4 is not set +# CT_NCURSES_V_6_2 is not set +# CT_NCURSES_V_6_1 is not set +# CT_NCURSES_V_6_0 is not set +CT_NCURSES_VERSION="6.5" +CT_NCURSES_MIRRORS="https://invisible-mirror.net/archives/ncurses $(CT_Mirrors GNU ncurses)" +CT_NCURSES_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_NCURSES_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_NCURSES_ARCHIVE_FORMATS=".tar.gz" +CT_NCURSES_SIGNATURE_FORMAT="packed/.asc" +CT_NCURSES_NEW_ABI=y +CT_NCURSES_HOST_CONFIG_ARGS="" +CT_NCURSES_HOST_DISABLE_DB=y +CT_NCURSES_HOST_FALLBACKS="linux,xterm,xterm-color,xterm-256color,vt100" +CT_NCURSES_TARGET_CONFIG_ARGS="" +# CT_NCURSES_TARGET_DISABLE_DB is not set +CT_NCURSES_TARGET_FALLBACKS="" +CT_NCURSES_EXTRA_CFLAGS="-std=gnu17" +CT_COMP_LIBS_ZLIB=y +CT_COMP_LIBS_ZLIB_PKG_KSYM="ZLIB" +CT_ZLIB_DIR_NAME="zlib" +CT_ZLIB_PKG_NAME="zlib" +CT_ZLIB_SRC_RELEASE=y +# CT_ZLIB_SRC_DEVEL is not set +CT_ZLIB_PATCH_ORDER="global" +CT_ZLIB_V_1_3_1=y +# CT_ZLIB_V_1_2_13 is not set +CT_ZLIB_VERSION="1.3.1" +CT_ZLIB_MIRRORS="https://github.com/madler/zlib/releases/download/v${CT_ZLIB_VERSION} https://www.zlib.net/" +CT_ZLIB_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_ZLIB_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_ZLIB_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_ZLIB_SIGNATURE_FORMAT="packed/.asc" +CT_COMP_LIBS_ZSTD=y +CT_COMP_LIBS_ZSTD_PKG_KSYM="ZSTD" +CT_ZSTD_DIR_NAME="zstd" +CT_ZSTD_PKG_NAME="zstd" +CT_ZSTD_SRC_RELEASE=y +# CT_ZSTD_SRC_DEVEL is not set +CT_ZSTD_PATCH_ORDER="global" +CT_ZSTD_V_1_5_7=y +# CT_ZSTD_V_1_5_6 is not set +# CT_ZSTD_V_1_5_5 is not set +# CT_ZSTD_V_1_5_2 is not set +CT_ZSTD_VERSION="1.5.7" +CT_ZSTD_MIRRORS="https://github.com/facebook/zstd/releases/download/v${CT_ZSTD_VERSION} https://downloads.sourceforge.net/project/zstandard.mirror/v${CT_ZSTD_VERSION}" +CT_ZSTD_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_ZSTD_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_ZSTD_ARCHIVE_FORMATS=".tar.gz" +CT_ZSTD_SIGNATURE_FORMAT="packed/.sig" +CT_ALL_COMP_LIBS_CHOICES="CLOOG EXPAT GETTEXT GMP GNUPRUMCU ISL LIBELF LIBICONV MPC MPFR NCURSES NEWLIB_NANO PICOLIBC ZLIB ZSTD" +CT_LIBICONV_NEEDED=y +CT_GETTEXT_NEEDED=y +CT_GMP_NEEDED=y +CT_MPFR_NEEDED=y +CT_ISL_NEEDED=y +CT_MPC_NEEDED=y +CT_NCURSES_NEEDED=y +CT_ZLIB_NEEDED=y +CT_ZSTD_NEEDED=y +CT_LIBICONV=y +CT_GETTEXT=y +CT_GMP=y +CT_MPFR=y +CT_ISL=y +CT_MPC=y +CT_NCURSES=y +CT_ZLIB=y +CT_ZSTD=y +# end of Companion libraries + +# +# Companion tools +# +# CT_COMP_TOOLS_FOR_HOST is not set +# CT_COMP_TOOLS_AUTOCONF is not set +# CT_COMP_TOOLS_AUTOMAKE is not set +# CT_COMP_TOOLS_BISON is not set +# CT_COMP_TOOLS_DTC is not set +# CT_COMP_TOOLS_LIBTOOL is not set +# CT_COMP_TOOLS_M4 is not set +# CT_COMP_TOOLS_MAKE is not set +CT_ALL_COMP_TOOLS_CHOICES="AUTOCONF AUTOMAKE BISON DTC LIBTOOL M4 MAKE" +# end of Companion tools diff --git a/src/_nfdos/aux/toolchain/toolchain_defconfig b/src/_nfdos/aux/toolchain/toolchain_defconfig new file mode 100644 index 0000000..f92d594 --- /dev/null +++ b/src/_nfdos/aux/toolchain/toolchain_defconfig @@ -0,0 +1,857 @@ +# +# Automatically generated file; DO NOT EDIT. +# crosstool-NG 1.28.0.1_403899e Configuration +# +CT_CONFIGURE_has_static_link=y +CT_CONFIGURE_has_cxx11=y +CT_CONFIGURE_has_wget=y +CT_CONFIGURE_has_curl=y +CT_CONFIGURE_has_meson=y +CT_CONFIGURE_has_ninja=y +CT_CONFIGURE_has_make_3_81_or_newer=y +CT_CONFIGURE_has_make_4_0_or_newer=y +CT_CONFIGURE_has_libtool_2_4_or_newer=y +CT_CONFIGURE_has_libtoolize_2_4_or_newer=y +CT_CONFIGURE_has_autoconf_2_65_or_newer=y +CT_CONFIGURE_has_autoreconf_2_65_or_newer=y +CT_CONFIGURE_has_automake_1_15_or_newer=y +CT_CONFIGURE_has_gnu_m4_1_4_12_or_newer=y +CT_CONFIGURE_has_python_3_4_or_newer=y +CT_CONFIGURE_has_bison_2_7_or_newer=y +CT_CONFIGURE_has_bison_3_0_4_or_newer=y +CT_CONFIGURE_has_python=y +CT_CONFIGURE_has_git=y +CT_CONFIGURE_has_md5sum=y +CT_CONFIGURE_has_sha1sum=y +CT_CONFIGURE_has_sha256sum=y +CT_CONFIGURE_has_sha512sum=y +CT_CONFIGURE_has_install_with_strip_program=y +CT_VERSION="1.28.0.1_403899e" +CT_VCHECK="" +CT_CONFIG_VERSION_ENV="4" +CT_CONFIG_VERSION_CURRENT="4" +CT_CONFIG_VERSION="4" +CT_MODULES=y + +# +# Paths and misc options +# + +# +# crosstool-NG behavior +# +CT_OBSOLETE=y +# CT_EXPERIMENTAL is not set +# CT_DEBUG_CT is not set + +# +# Paths +# +CT_LOCAL_TARBALLS_DIR="${HOME}/src" +CT_SAVE_TARBALLS=y +# CT_TARBALLS_BUILDROOT_LAYOUT is not set +CT_WORK_DIR="${CT_TOP_DIR}/.build" +CT_BUILD_TOP_DIR="${CT_WORK_DIR:-${CT_TOP_DIR}/.build}/${CT_HOST:+HOST-${CT_HOST}/}${CT_TARGET}" +CT_BUILD_DIR="${CT_BUILD_TOP_DIR}/build" +CT_PREFIX_DIR="${CT_PREFIX:-${HOME}/x-tools}/${CT_HOST:+HOST-${CT_HOST}/}${CT_TARGET}" +CT_RM_RF_PREFIX_DIR=y +CT_REMOVE_DOCS=y +CT_INSTALL_LICENSES=y +CT_PREFIX_DIR_RO=y +CT_STRIP_HOST_TOOLCHAIN_EXECUTABLES=y +# CT_STRIP_TARGET_TOOLCHAIN_EXECUTABLES is not set + +# +# Downloading +# +CT_DOWNLOAD_AGENT_WGET=y +# CT_DOWNLOAD_AGENT_CURL is not set +# CT_DOWNLOAD_AGENT_NONE is not set +# CT_FORBID_DOWNLOAD is not set +# CT_FORCE_DOWNLOAD is not set +CT_CONNECT_TIMEOUT=10 +CT_DOWNLOAD_WGET_OPTIONS="--tries=3 -nc --progress=dot:binary" +# CT_ONLY_DOWNLOAD is not set +# CT_USE_MIRROR is not set +CT_VERIFY_DOWNLOAD_DIGEST=y +CT_VERIFY_DOWNLOAD_DIGEST_SHA512=y +# CT_VERIFY_DOWNLOAD_DIGEST_SHA256 is not set +# CT_VERIFY_DOWNLOAD_DIGEST_SHA1 is not set +# CT_VERIFY_DOWNLOAD_DIGEST_MD5 is not set +CT_VERIFY_DOWNLOAD_DIGEST_ALG="sha512" +# CT_VERIFY_DOWNLOAD_SIGNATURE is not set + +# +# Extracting +# +# CT_FORCE_EXTRACT is not set +CT_OVERRIDE_CONFIG_GUESS_SUB=y +# CT_ONLY_EXTRACT is not set +CT_PATCH_BUNDLED=y +# CT_PATCH_BUNDLED_LOCAL is not set +CT_PATCH_ORDER="bundled" + +# +# Build behavior +# +CT_PARALLEL_JOBS=0 +CT_LOAD="" +CT_USE_PIPES=y +CT_EXTRA_CFLAGS_FOR_BUILD="" +CT_EXTRA_CXXFLAGS_FOR_BUILD="" +CT_EXTRA_LDFLAGS_FOR_BUILD="" +CT_EXTRA_CFLAGS_FOR_HOST="" +CT_EXTRA_LDFLAGS_FOR_HOST="" +# CT_CONFIG_SHELL_SH is not set +# CT_CONFIG_SHELL_ASH is not set +CT_CONFIG_SHELL_BASH=y +# CT_CONFIG_SHELL_CUSTOM is not set +CT_CONFIG_SHELL="${bash}" + +# +# Logging +# +# CT_LOG_ERROR is not set +# CT_LOG_WARN is not set +# CT_LOG_INFO is not set +CT_LOG_EXTRA=y +# CT_LOG_ALL is not set +# CT_LOG_DEBUG is not set +CT_LOG_LEVEL_MAX="EXTRA" +# CT_LOG_SEE_TOOLS_WARN is not set +CT_LOG_PROGRESS_BAR=y +CT_LOG_TO_FILE=y +CT_LOG_FILE_COMPRESS=y +# end of Paths and misc options + +# +# Target options +# +# CT_ARCH_ALPHA is not set +# CT_ARCH_ARC is not set +# CT_ARCH_ARM is not set +# CT_ARCH_AVR is not set +# CT_ARCH_BPF is not set +# CT_ARCH_M68K is not set +# CT_ARCH_MIPS is not set +# CT_ARCH_NIOS2 is not set +# CT_ARCH_POWERPC is not set +# CT_ARCH_PRU is not set +# CT_ARCH_RISCV is not set +# CT_ARCH_RX is not set +# CT_ARCH_S390 is not set +# CT_ARCH_SH is not set +# CT_ARCH_SPARC is not set +CT_ARCH_X86=y +# CT_ARCH_XTENSA is not set +CT_ARCH="x86" +CT_ARCH_CHOICE_KSYM="X86" +CT_ARCH_CPU="" +CT_ARCH_TUNE="" +CT_ARCH_X86_SHOW=y + +# +# Options for x86 +# +CT_ARCH_X86_PKG_KSYM="" +CT_ALL_ARCH_CHOICES="ALPHA ARC ARM AVR BPF C6X LM32 LOONGARCH M68K MICROBLAZE MIPS MOXIE MSP430 NIOS2 OPENRISC PARISC POWERPC PRU RISCV RX S390 SH SPARC TRICORE X86 XTENSA" +CT_ARCH_SUFFIX="" +# CT_OMIT_TARGET_VENDOR is not set + +# +# Generic target options +# +CT_MULTILIB=y +CT_ARCH_USE_MMU=y +CT_ARCH_SUPPORTS_LIBSANITIZER=y +CT_ARCH_SUPPORTS_32=y +CT_ARCH_SUPPORTS_64=y +CT_ARCH_DEFAULT_32=y +CT_ARCH_BITNESS=64 +# CT_ARCH_32 is not set +CT_ARCH_64=y +CT_ARCH_SUPPORTS_WITH_32_64=y + +# +# Target optimisations +# +CT_ARCH_SUPPORTS_WITH_ARCH=y +CT_ARCH_SUPPORTS_WITH_CPU=y +CT_ARCH_SUPPORTS_WITH_TUNE=y +CT_ARCH_ARCH="" +CT_TARGET_CFLAGS="" +CT_TARGET_LDFLAGS="" +# end of Target options + +# +# Toolchain options +# + +# +# General toolchain options +# +CT_USE_SYSROOT=y +CT_SYSROOT_NAME="sysroot" +CT_SYSROOT_DIR_PREFIX="" +CT_WANTS_STATIC_LINK=y +CT_WANTS_STATIC_LINK_CXX=y +# CT_STATIC_TOOLCHAIN is not set +CT_SHOW_CT_VERSION=y +CT_TOOLCHAIN_PKGVERSION="" +CT_TOOLCHAIN_BUGURL="" + +# +# Tuple completion and aliasing +# +CT_TARGET_VENDOR="nfdos" +CT_TARGET_ALIAS_SED_EXPR="" +CT_TARGET_ALIAS="" + +# +# Toolchain type +# +CT_CROSS=y +# CT_CANADIAN is not set +CT_TOOLCHAIN_TYPE="cross" + +# +# Build system +# +CT_BUILD="" +CT_BUILD_PREFIX="" +CT_BUILD_SUFFIX="" + +# +# Misc options +# +# CT_TOOLCHAIN_ENABLE_NLS is not set +# CT_TOOLCHAIN_CMAKE_TOOLCHAIN_FILE is not set +# end of Toolchain options + +# +# Operating System +# +CT_KERNEL_SUPPORTS_SHARED_LIBS=y +# CT_KERNEL_BARE_METAL is not set +CT_KERNEL_LINUX=y +CT_KERNEL="linux" +CT_KERNEL_CHOICE_KSYM="LINUX" +CT_KERNEL_LINUX_SHOW=y + +# +# Options for linux +# +CT_KERNEL_LINUX_PKG_KSYM="LINUX" +CT_LINUX_DIR_NAME="linux" +CT_LINUX_PKG_NAME="linux" +CT_LINUX_SRC_RELEASE=y +# CT_LINUX_SRC_DEVEL is not set +CT_LINUX_PATCH_ORDER="global" +CT_LINUX_V_6_16=y +# CT_LINUX_V_6_15 is not set +# CT_LINUX_V_6_14 is not set +# CT_LINUX_V_6_13 is not set +# CT_LINUX_V_6_12 is not set +# CT_LINUX_V_6_11 is not set +# CT_LINUX_V_6_10 is not set +# CT_LINUX_V_6_9 is not set +# CT_LINUX_V_6_8 is not set +# CT_LINUX_V_6_7 is not set +# CT_LINUX_V_6_6 is not set +# CT_LINUX_V_6_5 is not set +# CT_LINUX_V_6_4 is not set +# CT_LINUX_V_6_3 is not set +# CT_LINUX_V_6_2 is not set +# CT_LINUX_V_6_1 is not set +# CT_LINUX_V_6_0 is not set +# CT_LINUX_V_5_19 is not set +# CT_LINUX_V_5_18 is not set +# CT_LINUX_V_5_17 is not set +# CT_LINUX_V_5_16 is not set +# CT_LINUX_V_5_15 is not set +# CT_LINUX_V_5_14 is not set +# CT_LINUX_V_5_13 is not set +# CT_LINUX_V_5_12 is not set +# CT_LINUX_V_5_11 is not set +# CT_LINUX_V_5_10 is not set +# CT_LINUX_V_5_9 is not set +# CT_LINUX_V_5_8 is not set +# CT_LINUX_V_5_7 is not set +# CT_LINUX_V_5_5 is not set +# CT_LINUX_V_5_4 is not set +# CT_LINUX_V_5_3 is not set +# CT_LINUX_V_5_2 is not set +# CT_LINUX_V_5_1 is not set +# CT_LINUX_V_5_0 is not set +# CT_LINUX_V_4_20 is not set +# CT_LINUX_V_4_19 is not set +# CT_LINUX_V_4_18 is not set +# CT_LINUX_V_4_17 is not set +# CT_LINUX_V_4_16 is not set +# CT_LINUX_V_4_15 is not set +# CT_LINUX_V_4_14 is not set +# CT_LINUX_V_4_13 is not set +# CT_LINUX_V_4_12 is not set +# CT_LINUX_V_4_11 is not set +# CT_LINUX_V_4_10 is not set +# CT_LINUX_V_4_9 is not set +# CT_LINUX_V_4_4 is not set +# CT_LINUX_V_4_1 is not set +# CT_LINUX_V_3_18 is not set +# CT_LINUX_V_3_16 is not set +# CT_LINUX_V_3_13 is not set +# CT_LINUX_V_3_12 is not set +# CT_LINUX_V_3_10 is not set +# CT_LINUX_V_3_4 is not set +# CT_LINUX_V_3_2 is not set +# CT_LINUX_V_2_6_32 is not set +CT_LINUX_VERSION="6.16" +CT_LINUX_MIRRORS="$(CT_Mirrors kernel.org linux ${CT_LINUX_VERSION})" +CT_LINUX_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_LINUX_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_LINUX_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_LINUX_SIGNATURE_FORMAT="unpacked/.sign" +CT_LINUX_later_than_5_19=y +CT_LINUX_5_19_or_later=y +CT_LINUX_later_than_5_12=y +CT_LINUX_5_12_or_later=y +CT_LINUX_later_than_5_5=y +CT_LINUX_5_5_or_later=y +CT_LINUX_later_than_5_3=y +CT_LINUX_5_3_or_later=y +CT_LINUX_later_than_4_8=y +CT_LINUX_4_8_or_later=y +CT_LINUX_later_than_3_7=y +CT_LINUX_3_7_or_later=y +CT_LINUX_later_than_3_2=y +CT_LINUX_3_2_or_later=y +CT_KERNEL_has_rsync=y +CT_KERNEL_DEP_RSYNC=y +CT_KERNEL_LINUX_VERBOSITY_0=y +# CT_KERNEL_LINUX_VERBOSITY_1 is not set +# CT_KERNEL_LINUX_VERBOSITY_2 is not set +CT_KERNEL_LINUX_VERBOSE_LEVEL=0 +CT_ALL_KERNEL_CHOICES="BARE_METAL LINUX WINDOWS" + +# +# Common kernel options +# +CT_SHARED_LIBS=y +# end of Operating System + +# +# Binary utilities +# +CT_ARCH_BINFMT_ELF=y +CT_BINUTILS_BINUTILS=y +CT_BINUTILS="binutils" +CT_BINUTILS_CHOICE_KSYM="BINUTILS" +CT_BINUTILS_BINUTILS_SHOW=y + +# +# Options for binutils +# +CT_BINUTILS_BINUTILS_PKG_KSYM="BINUTILS" +CT_BINUTILS_DIR_NAME="binutils" +CT_BINUTILS_USE_GNU=y +# CT_BINUTILS_USE_ORACLE is not set +CT_BINUTILS_USE="BINUTILS" +CT_BINUTILS_PKG_NAME="binutils" +CT_BINUTILS_SRC_RELEASE=y +# CT_BINUTILS_SRC_DEVEL is not set +CT_BINUTILS_PATCH_ORDER="global" +CT_BINUTILS_V_2_45=y +# CT_BINUTILS_V_2_44 is not set +# CT_BINUTILS_V_2_43 is not set +# CT_BINUTILS_V_2_42 is not set +# CT_BINUTILS_V_2_41 is not set +# CT_BINUTILS_V_2_40 is not set +# CT_BINUTILS_V_2_39 is not set +# CT_BINUTILS_V_2_38 is not set +# CT_BINUTILS_V_2_37 is not set +# CT_BINUTILS_V_2_36 is not set +# CT_BINUTILS_V_2_35 is not set +# CT_BINUTILS_V_2_34 is not set +# CT_BINUTILS_V_2_33 is not set +# CT_BINUTILS_V_2_32 is not set +# CT_BINUTILS_V_2_31 is not set +# CT_BINUTILS_V_2_30 is not set +# CT_BINUTILS_V_2_29 is not set +# CT_BINUTILS_V_2_28 is not set +# CT_BINUTILS_V_2_27 is not set +# CT_BINUTILS_V_2_26 is not set +CT_BINUTILS_VERSION="2.45" +CT_BINUTILS_MIRRORS="$(CT_Mirrors GNU binutils) $(CT_Mirrors sourceware binutils/releases)" +CT_BINUTILS_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_BINUTILS_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_BINUTILS_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz" +CT_BINUTILS_SIGNATURE_FORMAT="packed/.sig" +CT_BINUTILS_2_45_or_later=y +CT_BINUTILS_2_45_or_older=y +CT_BINUTILS_later_than_2_44=y +CT_BINUTILS_2_44_or_later=y +CT_BINUTILS_later_than_2_41=y +CT_BINUTILS_2_41_or_later=y +CT_BINUTILS_later_than_2_39=y +CT_BINUTILS_2_39_or_later=y +CT_BINUTILS_later_than_2_30=y +CT_BINUTILS_2_30_or_later=y +CT_BINUTILS_later_than_2_27=y +CT_BINUTILS_2_27_or_later=y +CT_BINUTILS_later_than_2_26=y +CT_BINUTILS_2_26_or_later=y + +# +# GNU binutils +# +CT_BINUTILS_GOLD_SUPPORTS_ARCH=y +CT_BINUTILS_LINKER_LD=y +CT_BINUTILS_LINKERS_LIST="ld" +CT_BINUTILS_LINKER_DEFAULT="bfd" +# CT_BINUTILS_PLUGINS is not set +CT_BINUTILS_RELRO=m +CT_BINUTILS_DETERMINISTIC_ARCHIVES=y +CT_BINUTILS_EXTRA_CONFIG_ARRAY="" +# CT_BINUTILS_FOR_TARGET is not set +# CT_BINUTILS_GPROFNG is not set +CT_ALL_BINUTILS_CHOICES="BINUTILS" +# end of Binary utilities + +# +# C-library +# +# CT_LIBC_GLIBC is not set +CT_LIBC_MUSL=y +# CT_LIBC_UCLIBC_NG is not set +CT_LIBC="musl" +CT_LIBC_CHOICE_KSYM="MUSL" +CT_THREADS="musl" +CT_LIBC_MUSL_SHOW=y + +# +# Options for musl +# +CT_LIBC_MUSL_PKG_KSYM="MUSL" +CT_MUSL_DIR_NAME="musl" +CT_MUSL_PKG_NAME="musl" +CT_MUSL_SRC_RELEASE=y +# CT_MUSL_SRC_DEVEL is not set +CT_MUSL_PATCH_ORDER="global" +CT_MUSL_V_1_2_5=y +# CT_MUSL_V_1_2_4 is not set +# CT_MUSL_V_1_2_3 is not set +# CT_MUSL_V_1_2_2 is not set +# CT_MUSL_V_1_2_1 is not set +# CT_MUSL_V_1_1_24 is not set +# CT_MUSL_V_1_1_23 is not set +# CT_MUSL_V_1_1_22 is not set +# CT_MUSL_V_1_1_21 is not set +# CT_MUSL_V_1_1_20 is not set +# CT_MUSL_V_1_1_19 is not set +# CT_MUSL_V_1_1_18 is not set +# CT_MUSL_V_1_1_17 is not set +# CT_MUSL_V_1_1_16 is not set +CT_MUSL_VERSION="1.2.5" +CT_MUSL_MIRRORS="https://www.musl-libc.org/releases" +CT_MUSL_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_MUSL_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_MUSL_ARCHIVE_FORMATS=".tar.gz" +CT_MUSL_SIGNATURE_FORMAT="packed/.asc" +# CT_LIBC_MUSL_DEBUG is not set +# CT_LIBC_MUSL_WARNINGS is not set +# CT_LIBC_MUSL_OPTIMIZE_NONE is not set +CT_LIBC_MUSL_OPTIMIZE_AUTO=y +# CT_LIBC_MUSL_OPTIMIZE_SPEED is not set +# CT_LIBC_MUSL_OPTIMIZE_SIZE is not set +CT_LIBC_MUSL_OPTIMIZE="auto" +CT_LIBC_MUSL_EXTRA_CFLAGS="" +CT_ALL_LIBC_CHOICES="AVR_LIBC GLIBC MINGW_W64 MOXIEBOX MUSL NEWLIB NONE PICOLIBC UCLIBC_NG" +CT_LIBC_SUPPORT_THREADS_ANY=y +CT_LIBC_SUPPORT_THREADS_NATIVE=y + +# +# Common C library options +# +CT_THREADS_NATIVE=y +CT_CREATE_LDSO_CONF=y +CT_LDSO_CONF_EXTRA_DIRS_ARRAY="" +CT_LIBC_XLDD=y +# end of C-library + +# +# C compiler +# +CT_CC_CORE_NEEDED=y +CT_CC_SUPPORT_CXX=y +CT_CC_SUPPORT_FORTRAN=y +CT_CC_SUPPORT_ADA=y +CT_CC_SUPPORT_D=y +CT_CC_SUPPORT_JIT=y +CT_CC_SUPPORT_OBJC=y +CT_CC_SUPPORT_OBJCXX=y +CT_CC_SUPPORT_GOLANG=y +CT_CC_GCC=y +CT_CC="gcc" +CT_CC_CHOICE_KSYM="GCC" +CT_CC_GCC_SHOW=y + +# +# Options for gcc +# +CT_CC_GCC_PKG_KSYM="GCC" +CT_GCC_DIR_NAME="gcc" +CT_GCC_USE_GNU=y +# CT_GCC_USE_ORACLE is not set +CT_GCC_USE="GCC" +CT_GCC_PKG_NAME="gcc" +CT_GCC_SRC_RELEASE=y +# CT_GCC_SRC_DEVEL is not set +CT_GCC_PATCH_ORDER="global" +CT_GCC_V_15=y +# CT_GCC_V_14 is not set +# CT_GCC_V_13 is not set +# CT_GCC_V_12 is not set +# CT_GCC_V_11 is not set +# CT_GCC_V_10 is not set +# CT_GCC_V_9 is not set +# CT_GCC_V_8 is not set +# CT_GCC_V_7 is not set +# CT_GCC_V_6 is not set +# CT_GCC_V_5 is not set +# CT_GCC_V_4_9 is not set +CT_GCC_VERSION="15.2.0" +CT_GCC_MIRRORS="$(CT_Mirrors GNU gcc/gcc-${CT_GCC_VERSION}) $(CT_Mirrors sourceware gcc/releases/gcc-${CT_GCC_VERSION})" +CT_GCC_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_GCC_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_GCC_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_GCC_SIGNATURE_FORMAT="" +CT_GCC_later_than_15=y +CT_GCC_15_or_later=y +CT_GCC_later_than_14=y +CT_GCC_14_or_later=y +CT_GCC_later_than_13=y +CT_GCC_13_or_later=y +CT_GCC_later_than_12=y +CT_GCC_12_or_later=y +CT_GCC_later_than_11=y +CT_GCC_11_or_later=y +CT_GCC_later_than_10=y +CT_GCC_10_or_later=y +CT_GCC_later_than_9=y +CT_GCC_9_or_later=y +CT_GCC_later_than_8=y +CT_GCC_8_or_later=y +CT_GCC_later_than_7=y +CT_GCC_7_or_later=y +CT_GCC_later_than_6=y +CT_GCC_6_or_later=y +CT_GCC_later_than_5=y +CT_GCC_5_or_later=y +CT_GCC_later_than_4_9=y +CT_GCC_4_9_or_later=y +CT_CC_GCC_HAS_LIBMPX=y +CT_CC_GCC_ENABLE_CXX_FLAGS="" +CT_CC_GCC_CORE_EXTRA_CONFIG_ARRAY="" +CT_CC_GCC_EXTRA_CONFIG_ARRAY="" +CT_CC_GCC_MULTILIB_LIST="" +CT_CC_GCC_STATIC_LIBSTDCXX=y +# CT_CC_GCC_SYSTEM_ZLIB is not set +CT_CC_GCC_CONFIG_TLS=m + +# +# Optimisation features +# +CT_CC_GCC_USE_GRAPHITE=y +CT_CC_GCC_USE_LTO=y +CT_CC_GCC_LTO_ZSTD=m + +# +# Settings for libraries running on target +# +# CT_CC_GCC_ENABLE_DEFAULT_PIE is not set +CT_CC_GCC_ENABLE_TARGET_OPTSPACE=y +CT_CC_GCC_LIBSTDCXX=m +# CT_CC_GCC_LIBSTDCXX_HOSTED_DISABLE is not set +CT_CC_GCC_LIBSTDCXX_TARGET_CXXFLAGS="" +# CT_CC_GCC_LIBMUDFLAP is not set +# CT_CC_GCC_LIBGOMP is not set +# CT_CC_GCC_LIBSSP is not set +# CT_CC_GCC_LIBQUADMATH is not set +CT_CC_GCC_LIBSTDCXX_VERBOSE=m + +# +# Misc. obscure options. +# +CT_CC_CXA_ATEXIT=y +CT_CC_GCC_TM_CLONE_REGISTRY=m +# CT_CC_GCC_DISABLE_PCH is not set +CT_CC_GCC_SJLJ_EXCEPTIONS=m +CT_CC_GCC_LDBL_128=m +# CT_CC_GCC_BUILD_ID is not set +CT_CC_GCC_LNK_HASH_STYLE_DEFAULT=y +# CT_CC_GCC_LNK_HASH_STYLE_SYSV is not set +# CT_CC_GCC_LNK_HASH_STYLE_GNU is not set +# CT_CC_GCC_LNK_HASH_STYLE_BOTH is not set +CT_CC_GCC_LNK_HASH_STYLE="" +CT_CC_GCC_DEC_FLOATS_AUTO=y +# CT_CC_GCC_DEC_FLOATS_BID is not set +# CT_CC_GCC_DEC_FLOATS_DPD is not set +# CT_CC_GCC_DEC_FLOATS_NO is not set +CT_CC_GCC_DEC_FLOATS="" +CT_ALL_CC_CHOICES="GCC" + +# +# Additional supported languages: +# +CT_CC_LANG_CXX=y +# CT_CC_LANG_FORTRAN is not set +# end of C compiler + +# +# Linkers +# + +# +# BFD enabled in binutils +# +# CT_LINKER_MOLD is not set +CT_ALL_LINKER_CHOICES="MOLD" +# end of Linkers + +# +# Debug facilities +# +# CT_DEBUG_DUMA is not set +# CT_DEBUG_GDB is not set +# CT_DEBUG_LTRACE is not set +# CT_DEBUG_STRACE is not set +CT_ALL_DEBUG_CHOICES="DUMA GDB LTRACE STRACE" +# end of Debug facilities + +# +# Companion libraries +# +# CT_COMPLIBS_CHECK is not set +# CT_COMP_LIBS_CLOOG is not set +# CT_COMP_LIBS_EXPAT is not set +CT_COMP_LIBS_GETTEXT=y +CT_COMP_LIBS_GETTEXT_PKG_KSYM="GETTEXT" +CT_GETTEXT_DIR_NAME="gettext" +CT_GETTEXT_PKG_NAME="gettext" +CT_GETTEXT_SRC_RELEASE=y +# CT_GETTEXT_SRC_DEVEL is not set +CT_GETTEXT_PATCH_ORDER="global" +CT_GETTEXT_V_0_26=y +# CT_GETTEXT_V_0_23_1 is not set +# CT_GETTEXT_V_0_22_5 is not set +# CT_GETTEXT_V_0_21 is not set +# CT_GETTEXT_V_0_20_1 is not set +# CT_GETTEXT_V_0_19_8_1 is not set +CT_GETTEXT_VERSION="0.26" +CT_GETTEXT_MIRRORS="$(CT_Mirrors GNU gettext)" +CT_GETTEXT_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_GETTEXT_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_GETTEXT_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_GETTEXT_SIGNATURE_FORMAT="packed/.sig" +CT_GETTEXT_later_than_0_23=y +CT_GETTEXT_0_23_or_later=y +CT_GETTEXT_later_than_0_21=y +CT_GETTEXT_0_21_or_later=y +CT_GETTEXT_INCOMPATIBLE_WITH_UCLIBC_NG=y + +# +# This version of gettext is not compatible with uClibc-NG. Select +# + +# +# a different version if uClibc-NG is used on the target or (in a +# + +# +# Canadian cross build) on the host. +# +CT_COMP_LIBS_GMP=y +CT_COMP_LIBS_GMP_PKG_KSYM="GMP" +CT_GMP_DIR_NAME="gmp" +CT_GMP_PKG_NAME="gmp" +CT_GMP_SRC_RELEASE=y +# CT_GMP_SRC_DEVEL is not set +CT_GMP_PATCH_ORDER="global" +CT_GMP_V_6_3=y +# CT_GMP_V_6_2 is not set +# CT_GMP_V_6_1 is not set +CT_GMP_VERSION="6.3.0" +CT_GMP_MIRRORS="https://gmplib.org/download/gmp https://gmplib.org/download/gmp/archive $(CT_Mirrors GNU gmp)" +CT_GMP_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_GMP_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_GMP_ARCHIVE_FORMATS=".tar.xz .tar.lz .tar.bz2" +CT_GMP_SIGNATURE_FORMAT="packed/.sig" +CT_GMP_EXTRA_CFLAGS="-std=gnu17" +CT_COMP_LIBS_ISL=y +CT_COMP_LIBS_ISL_PKG_KSYM="ISL" +CT_ISL_DIR_NAME="isl" +CT_ISL_PKG_NAME="isl" +CT_ISL_SRC_RELEASE=y +# CT_ISL_SRC_DEVEL is not set +CT_ISL_PATCH_ORDER="global" +CT_ISL_V_0_27=y +# CT_ISL_V_0_26 is not set +# CT_ISL_V_0_25 is not set +# CT_ISL_V_0_24 is not set +# CT_ISL_V_0_23 is not set +# CT_ISL_V_0_22 is not set +# CT_ISL_V_0_21 is not set +# CT_ISL_V_0_20 is not set +# CT_ISL_V_0_19 is not set +# CT_ISL_V_0_18 is not set +# CT_ISL_V_0_17 is not set +# CT_ISL_V_0_16 is not set +# CT_ISL_V_0_15 is not set +# CT_ISL_V_0_11 is not set +CT_ISL_VERSION="0.27" +CT_ISL_MIRRORS="https://libisl.sourceforge.io" +CT_ISL_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_ISL_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_ISL_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz" +CT_ISL_SIGNATURE_FORMAT="" +CT_ISL_later_than_0_18=y +CT_ISL_0_18_or_later=y +CT_ISL_later_than_0_15=y +CT_ISL_0_15_or_later=y +# CT_COMP_LIBS_LIBELF is not set +CT_COMP_LIBS_LIBICONV=y +CT_COMP_LIBS_LIBICONV_PKG_KSYM="LIBICONV" +CT_LIBICONV_DIR_NAME="libiconv" +CT_LIBICONV_PKG_NAME="libiconv" +CT_LIBICONV_SRC_RELEASE=y +# CT_LIBICONV_SRC_DEVEL is not set +CT_LIBICONV_PATCH_ORDER="global" +CT_LIBICONV_V_1_18=y +# CT_LIBICONV_V_1_16 is not set +# CT_LIBICONV_V_1_15 is not set +CT_LIBICONV_VERSION="1.18" +CT_LIBICONV_MIRRORS="$(CT_Mirrors GNU libiconv)" +CT_LIBICONV_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_LIBICONV_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_LIBICONV_ARCHIVE_FORMATS=".tar.gz" +CT_LIBICONV_SIGNATURE_FORMAT="packed/.sig" +CT_COMP_LIBS_MPC=y +CT_COMP_LIBS_MPC_PKG_KSYM="MPC" +CT_MPC_DIR_NAME="mpc" +CT_MPC_PKG_NAME="mpc" +CT_MPC_SRC_RELEASE=y +# CT_MPC_SRC_DEVEL is not set +CT_MPC_PATCH_ORDER="global" +CT_MPC_V_1_3=y +CT_MPC_VERSION="1.3.1" +CT_MPC_MIRRORS="https://www.multiprecision.org/downloads $(CT_Mirrors GNU mpc)" +CT_MPC_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_MPC_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_MPC_ARCHIVE_FORMATS=".tar.gz" +CT_MPC_SIGNATURE_FORMAT="packed/.sig" +CT_COMP_LIBS_MPFR=y +CT_COMP_LIBS_MPFR_PKG_KSYM="MPFR" +CT_MPFR_DIR_NAME="mpfr" +CT_MPFR_PKG_NAME="mpfr" +CT_MPFR_SRC_RELEASE=y +# CT_MPFR_SRC_DEVEL is not set +CT_MPFR_PATCH_ORDER="global" +CT_MPFR_V_4_2=y +CT_MPFR_VERSION="4.2.2" +CT_MPFR_MIRRORS="https://www.mpfr.org/mpfr-${CT_MPFR_VERSION} $(CT_Mirrors GNU mpfr)" +CT_MPFR_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_MPFR_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_MPFR_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz .zip" +CT_MPFR_SIGNATURE_FORMAT="packed/.asc" +CT_COMP_LIBS_NCURSES=y +CT_COMP_LIBS_NCURSES_PKG_KSYM="NCURSES" +CT_NCURSES_DIR_NAME="ncurses" +CT_NCURSES_PKG_NAME="ncurses" +CT_NCURSES_SRC_RELEASE=y +# CT_NCURSES_SRC_DEVEL is not set +CT_NCURSES_PATCH_ORDER="global" +CT_NCURSES_V_6_5=y +# CT_NCURSES_V_6_4 is not set +# CT_NCURSES_V_6_2 is not set +# CT_NCURSES_V_6_1 is not set +# CT_NCURSES_V_6_0 is not set +CT_NCURSES_VERSION="6.5" +CT_NCURSES_MIRRORS="https://invisible-mirror.net/archives/ncurses $(CT_Mirrors GNU ncurses)" +CT_NCURSES_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_NCURSES_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_NCURSES_ARCHIVE_FORMATS=".tar.gz" +CT_NCURSES_SIGNATURE_FORMAT="packed/.asc" +CT_NCURSES_NEW_ABI=y +CT_NCURSES_HOST_CONFIG_ARGS="" +CT_NCURSES_HOST_DISABLE_DB=y +CT_NCURSES_HOST_FALLBACKS="linux,xterm,xterm-color,xterm-256color,vt100" +CT_NCURSES_TARGET_CONFIG_ARGS="" +# CT_NCURSES_TARGET_DISABLE_DB is not set +CT_NCURSES_TARGET_FALLBACKS="" +CT_NCURSES_EXTRA_CFLAGS="-std=gnu17" +CT_COMP_LIBS_ZLIB=y +CT_COMP_LIBS_ZLIB_PKG_KSYM="ZLIB" +CT_ZLIB_DIR_NAME="zlib" +CT_ZLIB_PKG_NAME="zlib" +CT_ZLIB_SRC_RELEASE=y +# CT_ZLIB_SRC_DEVEL is not set +CT_ZLIB_PATCH_ORDER="global" +CT_ZLIB_V_1_3_1=y +# CT_ZLIB_V_1_2_13 is not set +CT_ZLIB_VERSION="1.3.1" +CT_ZLIB_MIRRORS="https://github.com/madler/zlib/releases/download/v${CT_ZLIB_VERSION} https://www.zlib.net/" +CT_ZLIB_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_ZLIB_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_ZLIB_ARCHIVE_FORMATS=".tar.xz .tar.gz" +CT_ZLIB_SIGNATURE_FORMAT="packed/.asc" +CT_COMP_LIBS_ZSTD=y +CT_COMP_LIBS_ZSTD_PKG_KSYM="ZSTD" +CT_ZSTD_DIR_NAME="zstd" +CT_ZSTD_PKG_NAME="zstd" +CT_ZSTD_SRC_RELEASE=y +# CT_ZSTD_SRC_DEVEL is not set +CT_ZSTD_PATCH_ORDER="global" +CT_ZSTD_V_1_5_7=y +# CT_ZSTD_V_1_5_6 is not set +# CT_ZSTD_V_1_5_5 is not set +# CT_ZSTD_V_1_5_2 is not set +CT_ZSTD_VERSION="1.5.7" +CT_ZSTD_MIRRORS="https://github.com/facebook/zstd/releases/download/v${CT_ZSTD_VERSION} https://downloads.sourceforge.net/project/zstandard.mirror/v${CT_ZSTD_VERSION}" +CT_ZSTD_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_ZSTD_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_ZSTD_ARCHIVE_FORMATS=".tar.gz" +CT_ZSTD_SIGNATURE_FORMAT="packed/.sig" +CT_ALL_COMP_LIBS_CHOICES="CLOOG EXPAT GETTEXT GMP GNUPRUMCU ISL LIBELF LIBICONV MPC MPFR NCURSES NEWLIB_NANO PICOLIBC ZLIB ZSTD" +CT_LIBICONV_NEEDED=y +CT_GETTEXT_NEEDED=y +CT_GMP_NEEDED=y +CT_MPFR_NEEDED=y +CT_ISL_NEEDED=y +CT_MPC_NEEDED=y +CT_NCURSES_NEEDED=y +CT_ZLIB_NEEDED=y +CT_ZSTD_NEEDED=y +CT_LIBICONV=y +CT_GETTEXT=y +CT_GMP=y +CT_MPFR=y +CT_ISL=y +CT_MPC=y +CT_NCURSES=y +CT_ZLIB=y +CT_ZSTD=y +# end of Companion libraries + +# +# Companion tools +# +# CT_COMP_TOOLS_FOR_HOST is not set +# CT_COMP_TOOLS_AUTOCONF is not set +# CT_COMP_TOOLS_AUTOMAKE is not set +# CT_COMP_TOOLS_BISON is not set +# CT_COMP_TOOLS_DTC is not set +# CT_COMP_TOOLS_LIBTOOL is not set +# CT_COMP_TOOLS_M4 is not set +# CT_COMP_TOOLS_MAKE is not set +CT_ALL_COMP_TOOLS_CHOICES="AUTOCONF AUTOMAKE BISON DTC LIBTOOL M4 MAKE" +# end of Companion tools diff --git a/src/_nfdos/bootloader/build/iso/boot/grub/grub.cfg b/src/_nfdos/bootloader/build/iso/boot/grub/grub.cfg new file mode 100644 index 0000000..85afe05 --- /dev/null +++ b/src/_nfdos/bootloader/build/iso/boot/grub/grub.cfg @@ -0,0 +1,7 @@ +set timeout=0 +set default=0 + +menuentry "NFDOS Kernel" { + multiboot /boot/nfdos.elf + boot +} diff --git a/src/_nfdos/bootloader/build/iso/boot/nfdos.elf b/src/_nfdos/bootloader/build/iso/boot/nfdos.elf new file mode 100755 index 0000000..6f3af8a Binary files /dev/null and b/src/_nfdos/bootloader/build/iso/boot/nfdos.elf differ diff --git a/src/_nfdos/bootloader/grub.cfg b/src/_nfdos/bootloader/grub.cfg new file mode 100644 index 0000000..85afe05 --- /dev/null +++ b/src/_nfdos/bootloader/grub.cfg @@ -0,0 +1,7 @@ +set timeout=0 +set default=0 + +menuentry "NFDOS Kernel" { + multiboot /boot/nfdos.elf + boot +} diff --git a/src/_nfdos/init b/src/_nfdos/init new file mode 100755 index 0000000..5f05f9e --- /dev/null +++ b/src/_nfdos/init @@ -0,0 +1,26 @@ +#!/bin/sh + +mount -t proc proc /proc +mount -t sysfs sys /sys +mount -t devtmpfs devtmpfs /dev + +if [ -f /opt/kernel/neurotron/neurotron_main.py ]; then + export PYTHONHOME=/usr + export PYTHONPATH=/usr/lib/python3.13:/usr/lib/python3.13/site-packages + export PATH=/sbin:/bin:/usr/sbin:/usr/bin + + echo '👉 Inicializando hipocampo físico...' + /usr/bin/python3 /opt/kernel/neurotron/neurotron_core/disk_init.py + + echo '👉 Inicializando o Neurotron...' + /usr/bin/python3 /opt/kernel/neurotron/neurotron_main.py || echo "⚠️ Neurotron falhou" & + + sleep 5 + + echo '👉 Inicializando Painel de Telemetria do Neurotron...' + /usr/bin/python3 /opt/kernel/neurotron/neurotron_core/telemetry_tail.py +else + echo '⚙️ BusyBox ativo — Neurotron ausente.' +fi + +exec /bin/sh diff --git a/src/_nfdos/iso/boot/bzImage b/src/_nfdos/iso/boot/bzImage new file mode 100644 index 0000000..7fb1246 Binary files /dev/null and b/src/_nfdos/iso/boot/bzImage differ diff --git a/src/_nfdos/iso/boot/grub/grub.cfg b/src/_nfdos/iso/boot/grub/grub.cfg new file mode 100644 index 0000000..1300c05 --- /dev/null +++ b/src/_nfdos/iso/boot/grub/grub.cfg @@ -0,0 +1,21 @@ +# NFDOS GRUB2 configuration +set timeout=3 +set default=0 + +# Parâmetros padrão do kernel (ajustáveis) +set kernelopts="console=ttyS0 earlyprintk=serial,ttyS0,115200 keep_bootcon loglevel=8" + +menuentry 'NFDOS Linux (Serial Console)' { + linux /boot/bzImage root=/dev/ram0 ${kernelopts} + initrd /boot/initramfs.cpio.gz +} + +menuentry 'NFDOS Linux (Serial Console + Force Format)' { + linux /boot/bzImage root=/dev/ram0 ${kernelopts} nfdos_force_format=1 + initrd /boot/initramfs.cpio.gz +} + +menuentry 'NFDOS Linux (Standard VGA)' { + linux /boot/bzImage root=/dev/ram0 quiet ${kernelopts} + initrd /boot/initramfs.cpio.gz +} diff --git a/src/_nfdos/kernel/boot.S b/src/_nfdos/kernel/boot.S new file mode 100644 index 0000000..7dfc62f --- /dev/null +++ b/src/_nfdos/kernel/boot.S @@ -0,0 +1,27 @@ +/* Entrada em 32-bit (GRUB carrega em protected mode) */ +.intel_syntax noprefix +.section .text +.code32 +.global _start +.extern kernel_main + +/* pilha simples */ +.section .bss +.align 16 +.global _stack_top +_stack_bottom: + .skip 16384 /* 16 KiB */ +_stack_top: + +.section .text +_start: + /* setar a pilha */ + mov esp, offset _stack_top + + /* chamar o C */ + call kernel_main + +halt: + cli + hlt + jmp halt diff --git a/src/_nfdos/kernel/boot.o b/src/_nfdos/kernel/boot.o new file mode 100644 index 0000000..abd8304 Binary files /dev/null and b/src/_nfdos/kernel/boot.o differ diff --git a/src/_nfdos/kernel/kernel.elf b/src/_nfdos/kernel/kernel.elf new file mode 100755 index 0000000..6f3af8a Binary files /dev/null and b/src/_nfdos/kernel/kernel.elf differ diff --git a/src/_nfdos/kernel/kmain.c b/src/_nfdos/kernel/kmain.c new file mode 100644 index 0000000..1dfa960 --- /dev/null +++ b/src/_nfdos/kernel/kmain.c @@ -0,0 +1,13 @@ +#include +#include +#include "vga.h" + +enum { COLOR_LIGHT_GREY = 0x07, COLOR_GREEN = 0x0A, COLOR_CYAN = 0x0B }; + +void kernel_main(void) { + vga_clear(COLOR_LIGHT_GREY); + vga_write_str("NFDOS Kernel iniciado!\n", COLOR_GREEN); + vga_write_str("Boot via GRUB (Multiboot v1), modo 32-bit OK.\n", COLOR_CYAN); + + for(;;) { __asm__ __volatile__("hlt"); } +} diff --git a/src/_nfdos/kernel/kmain.o b/src/_nfdos/kernel/kmain.o new file mode 100644 index 0000000..440717e Binary files /dev/null and b/src/_nfdos/kernel/kmain.o differ diff --git a/src/_nfdos/kernel/linker.ld b/src/_nfdos/kernel/linker.ld new file mode 100644 index 0000000..6dac3dc --- /dev/null +++ b/src/_nfdos/kernel/linker.ld @@ -0,0 +1,37 @@ +OUTPUT_FORMAT(elf32-i386) +OUTPUT_ARCH(i386) +ENTRY(_start) + +SECTIONS +{ + /* Carregar o kernel a partir do 1MB */ + . = 0x00100000; + + /* A header Multiboot tem de estar nas primeiras 8KB do binário */ + .multiboot : + { + KEEP(*(.multiboot)) + KEEP(*(.multiboot.*)) + } + + .text : + { + *(.text .text.*) + } + + .rodata : + { + *(.rodata .rodata.*) + } + + .data : + { + *(.data .data.*) + } + + .bss : + { + *(COMMON) + *(.bss .bss.*) + } +} diff --git a/src/_nfdos/kernel/multiboot_header.S b/src/_nfdos/kernel/multiboot_header.S new file mode 100644 index 0000000..524a017 --- /dev/null +++ b/src/_nfdos/kernel/multiboot_header.S @@ -0,0 +1,10 @@ +/* Multiboot v1 header simples (GRUB) */ +.section .multiboot +.align 4 +.set MB_MAGIC, 0x1BADB002 +.set MB_FLAGS, 0x00000001 /* align modules on page boundaries */ +.set MB_CHECK, -(MB_MAGIC + MB_FLAGS) + +.long MB_MAGIC +.long MB_FLAGS +.long MB_CHECK diff --git a/src/_nfdos/kernel/multiboot_header.o b/src/_nfdos/kernel/multiboot_header.o new file mode 100644 index 0000000..dd0f201 Binary files /dev/null and b/src/_nfdos/kernel/multiboot_header.o differ diff --git a/src/_nfdos/kernel/neurotron/__init__.py b/src/_nfdos/kernel/neurotron/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/_nfdos/kernel/neurotron/neurotron_core/autodiagnostic.py b/src/_nfdos/kernel/neurotron/neurotron_core/autodiagnostic.py new file mode 100644 index 0000000..0646a59 --- /dev/null +++ b/src/_nfdos/kernel/neurotron/neurotron_core/autodiagnostic.py @@ -0,0 +1,185 @@ +from __future__ import annotations +import json, os +from datetime import datetime, timezone +from rich.console import Console +from rich.table import Table +from pathlib import Path + +from .neurotron_config import ( + NEUROTRON_DATASET_PATH, NEUROTRON_HISTORY_KEEP, NEUROTRON_DIAG_SCHEMA, + HOMEOSTASIS_CPU_WARN, HOMEOSTASIS_CPU_ALERT, + HOMEOSTASIS_MEM_WARN, HOMEOSTASIS_MEM_ALERT, + HOMEOSTASIS_LOAD_WARN, HOMEOSTASIS_LOAD_ALERT, +) +from .perception import Perception + +console = Console() + +def _now_iso(): + return datetime.now(timezone.utc).isoformat() + +class AutoDiagnostic: + def __init__(self, runtime_dir: str, log_dir: str): + self.runtime_dir = runtime_dir + self.log_dir = log_dir + self.data_dir = Path(NEUROTRON_DATASET_PATH) + self.data_dir.mkdir(parents=True, exist_ok=True) + self.last_file = self.data_dir / "last_diagnostic.json" + + self.perception = Perception() + self.current = None + self.previous = None + + def _load_previous(self): + if not self.last_file.exists(): + return None + try: + with open(self.last_file, "r") as f: + return json.load(f) + except Exception: + return None + + def _save_current(self, payload: dict): + history = [] + if self.last_file.exists(): + try: + with open(self.last_file, "r") as f: + prev = json.load(f) + history = prev.get("history", []) + history.append({ + "timestamp": prev.get("timestamp"), + "cpu_percent": prev.get("cpu_percent"), + "mem_percent": prev.get("mem_percent"), + "loadavg": prev.get("loadavg"), + "state": prev.get("state", "UNKNOWN"), + }) + history = history[-NEUROTRON_HISTORY_KEEP:] + except Exception: + history = [] + payload["history"] = history + with open(self.last_file, "w") as f: + json.dump(payload, f, indent=2) + + def _classify_state(self, cpu, mem, l1): + # valores podem ser "?" + try: + cpu = float(cpu) + mem = float(mem) + l1 = float(l1) + except Exception: + return "UNKNOWN" + + # ALERT/CRITICAL + if cpu >= HOMEOSTASIS_CPU_ALERT or mem >= HOMEOSTASIS_MEM_ALERT or l1 >= HOMEOSTASIS_LOAD_ALERT: + return "CRITICAL" + if cpu >= HOMEOSTASIS_CPU_WARN or mem >= HOMEOSTASIS_MEM_WARN or l1 >= HOMEOSTASIS_LOAD_WARN: + return "ALERT" + + # OKs + return "STABLE" + + def _delta(self, a, b): + try: + if isinstance(a, list) and isinstance(b, list) and len(a) == len(b): + return [round(float(x) - float(y), 2) for x, y in zip(a, b)] + return round(float(a) - float(b), 2) + except Exception: + return "?" + + def _render_mini_trend(self, values, width=24, charset="▁▂▃▄▅▆▇█"): + if not values: + return "" + lo = min(values); hi = max(values) + if not isinstance(lo, (int, float)) or not isinstance(hi, (int, float)): + return "" + span = (hi - lo) or 1.0 + levels = len(charset) - 1 + bars = [] + for v in values[-width:]: + if not isinstance(v, (int, float)): + bars.append("·") + continue + i = int(round((v - lo) / span * levels)) + bars.append(charset[i]) + return "".join(bars) + + def run_exam(self): + console.print("\n[bold]🤖 Iniciando rotina de Auto-Diagnóstico Evolutivo...[/bold]\n") + + snap = self.perception.snapshot() + cpu = snap.get("cpu_percent", "?") + mem = snap.get("mem_percent", "?") + load = snap.get("loadavg", ["?", "?", "?"]) + + prev = self._load_previous() + self.previous = prev + + # deltas + cpu_prev = prev.get("cpu_percent") if prev else "?" + mem_prev = prev.get("mem_percent") if prev else "?" + load_prev = prev.get("loadavg") if prev else ["?", "?", "?"] + + d_cpu = self._delta(cpu, cpu_prev) + d_mem = self._delta(mem, mem_prev) + d_load = self._delta(load, load_prev) + + # estado + l1 = load[0] if isinstance(load, list) and load else "?" + state = self._classify_state(cpu, mem, l1) + + # tabela + table = Table(title="🩺 Exame Clínico Evolutivo", show_lines=True) + table.add_column("Sinal Vital") + table.add_column("Atual", justify="right") + table.add_column("Δ", justify="center") + table.add_column("Anterior", justify="right") + + def fmt(v): + if isinstance(v, list): + return str(v) + return str(v) + + table.add_row("CPU (%)", fmt(cpu), fmt(d_cpu), fmt(cpu_prev)) + table.add_row("Memória (%)", fmt(mem), fmt(d_mem), fmt(mem_prev)) + table.add_row("Carga média (1/5/15)", fmt(load), "≈" if d_load == "?" else fmt(d_load), fmt(load_prev)) + console.print(table) + + payload = { + "schema": NEUROTRON_DIAG_SCHEMA, + "timestamp": _now_iso(), + "cpu_percent": cpu, + "mem_percent": mem, + "loadavg": load, + "state": state, + "env": { + "user": snap.get("env_user"), + "term": snap.get("env_term"), + }, + } + self._save_current(payload) + console.print(f"[green]✔ Histórico evolutivo atualizado em:[/green] \n{self.last_file}") + + # Atualiza telemetria contínua + try: + telemetry_file = Path(NEUROTRON_DATASET_PATH) / "telemetry.json" + telemetry_file.parent.mkdir(parents=True, exist_ok=True) + + telemetry = [] + if telemetry_file.exists(): + telemetry = json.loads(telemetry_file.read_text() or "[]") + + telemetry.append({ + "timestamp": payload["timestamp"], + "cpu": payload.get("cpu_percent"), + "mem": payload.get("mem_percent"), + "load": payload.get("loadavg"), + "state": payload.get("state"), + }) + + telemetry = telemetry[-128:] # manter últimas 128 amostras + telemetry_file.write_text(json.dumps(telemetry, indent=2)) + except Exception as e: + console.print(f"[yellow]⚠️ Falha ao atualizar telemetria:[/] {e}") + + + return state, payload diff --git a/src/_nfdos/kernel/neurotron/neurotron_core/cortex.py b/src/_nfdos/kernel/neurotron/neurotron_core/cortex.py new file mode 100644 index 0000000..8102e77 --- /dev/null +++ b/src/_nfdos/kernel/neurotron/neurotron_core/cortex.py @@ -0,0 +1,230 @@ +import json +import time +from collections import defaultdict, deque +from pathlib import Path +from time import sleep +from rich.console import Console + +from neuron import Neuron +from hippocampus import Hippocampus +from perception import Perception +from motor import Motor + +from .neurotron_config import ( + NEUROTRON_MODE, NEUROTRON_TICK, NEUROTRON_TICK_MIN, NEUROTRON_TICK_MAX, NEUROTRON_TICK_STEP, + NEUROTRON_DIAG_EVERY_TICKS, NEUROTRON_DATASET_PATH, + HEARTBEAT_ENABLED, HEARTBEAT_STYLE, NEUROTRON_THRESHOLDS, + TELEMETRY_MAXLEN, TELEMETRY_FLUSH_EVERY_TICKS, +) +from .autodiagnostic import AutoDiagnostic + + +class VitalSigns(Neuron): + name = "VitalSigns" + def observe(self) -> None: + snap = self.ctx.perception.snapshot() + self.publish("vitals", snap) + self.ctx.memory.remember("observe.vitals", snap) + + +class EchoAgent(Neuron): + name = "EchoAgent" + def think(self) -> None: + msg = self.consume("vitals") + if msg: + self.publish("actions", {"action": "echo", "text": f"CPU {msg.get('cpu_percent', '?')}%"}) + + +class Cortex: + """ + Orquestrador: liga neurónios, bus de mensagens, memória, IO e ciclo cognitivo. + Agora com Telemetria Contínua (V5): heartbeat, microalertas e flush periódico. + """ + def __init__(self, runtime_dir, log_dir, tick_seconds=NEUROTRON_TICK): + self.runtime_dir = runtime_dir + self.log_dir = log_dir + self.tick = float(tick_seconds) + self.mode = NEUROTRON_MODE + self._tick_count = 0 + self.diagnostic = AutoDiagnostic(runtime_dir, log_dir) + + self.console = Console() + self.memory = Hippocampus(log_dir=log_dir) + self.perception = Perception() + self.motor = Motor() + + # Message bus simples: channels → deque + self.bus = defaultdict(lambda: deque(maxlen=32)) + + # Telemetria em memória (curto prazo) + self.telemetry = deque(maxlen=TELEMETRY_MAXLEN) + + # Regista neurónios (podes adicionar mais à medida) + self.neurons: list[Neuron] = [ + VitalSigns(self), + EchoAgent(self), + ] + + self._booted = False + + # Caminho para gravar a telemetria + self.telemetry_path = Path(NEUROTRON_DATASET_PATH) / "telemetry.json" + self.telemetry_path.parent.mkdir(parents=True, exist_ok=True) + + # ——— ciclo de vida ——— + def boot(self) -> None: + if self._booted: + return + self.console.print("[bold cyan]🧠 Neurotron[/] — boot") + self.memory.remember("boot", {"version": "0.1", "tick": self.tick}) + self._booted = True + state, _ = self.diagnostic.run_exam() + self._apply_homeostasis(state) + + def _apply_homeostasis(self, state): + if state == "CRITICAL": + self.mode = "diagnostic" + self.tick = min(NEUROTRON_TICK_MAX, self.tick + NEUROTRON_TICK_STEP) + elif state == "ALERT": + self.tick = min(NEUROTRON_TICK_MAX, self.tick + NEUROTRON_TICK_STEP / 2) + elif state == "STABLE": + self.tick = max(NEUROTRON_TICK_MIN, self.tick - NEUROTRON_TICK_STEP / 2) + # UNKNOWN → não mexe + + def shutdown(self, reason: str = ""): + self.console.print(f"[yellow]shutdown:[/] {reason}") + self.memory.remember("shutdown", {"reason": reason}) + + def fatal(self, e: Exception): + self.console.print(f"[red]fatal:[/] {e!r}") + self.memory.remember("fatal", {"error": repr(e)}) + print(f"fatal: {repr(e)}") + raise + + # ——— loop ——— + def observe(self) -> None: + for n in self.neurons: + n.observe() + + def think(self) -> None: + for n in self.neurons: + n.think() + + def act(self) -> None: + # Consumir ações agregadas e executar + action = self.bus_consume("actions") + if action and action.get("action") == "echo": + res = self.motor.run("echo", [action.get("text", "")]) + self.memory.remember("act.echo", res) + if res.get("stdout"): + self.console.print(f"[green]{res['stdout'].strip()}[/]") + + def rest(self): + # Heartbeat e microalertas antes de dormir + if HEARTBEAT_ENABLED: + self._heartbeat_and_telemetry() + + # Pausa regulada + sleep(self.tick) + + # Contador e rotinas periódicas + self._tick_count += 1 + + if self._tick_count % NEUROTRON_DIAG_EVERY_TICKS == 0: + state, _ = self.diagnostic.run_exam() + self._apply_homeostasis(state) + + if self._tick_count % TELEMETRY_FLUSH_EVERY_TICKS == 0: + self._flush_telemetry() + + # ——— telemetria/alertas ——— + def _heartbeat_and_telemetry(self): + snap = self.perception.snapshot() + cpu = snap.get("cpu_percent", "?") + mem = (snap.get("mem") or {}).get("percent", "?") + load = snap.get("loadavg") or [] + + # Adiciona ao buffer de telemetria + self.telemetry.append({ + "ts": time.time(), + "cpu": cpu, + "mem": mem, + "load": load, + "tick": self.tick, + }) + + # Microalertas com base nos limiares + self._evaluate_microalerts(cpu, mem, load) + + # Heartbeat visual + color = self._color_for_levels(cpu, mem, load) + if HEARTBEAT_STYLE == "compact": + self.console.print(f"[bold {color}]💓[/] CPU: {cpu}% | MEM: {mem}% | TICK: {self.tick:.2f}s") + else: + self.console.print( + f"[bold {color}]💓 [Heartbeat][/bold {color}] " + f"CPU: {cpu}% | MEM: {mem}% | LOAD: {load} | TICK: {self.tick:.2f}s | MODE: {self.mode}" + ) + + def _evaluate_microalerts(self, cpu, mem, load): + alerts = [] + # Normaliza + load1 = load[0] if (isinstance(load, (list, tuple)) and load) else None + + try: + if isinstance(cpu, (int, float)) and cpu >= NEUROTRON_THRESHOLDS["cpu_high"]: + alerts.append(("cpu", cpu)) + if isinstance(mem, (int, float)) and mem >= NEUROTRON_THRESHOLDS["mem_high"]: + alerts.append(("mem", mem)) + if isinstance(load1, (int, float)) and load1 >= NEUROTRON_THRESHOLDS["load1_high"]: + alerts.append(("load1", load1)) + except KeyError: + pass # thresholds incompletos → sem microalertas + + if not alerts: + return + + for (metric, value) in alerts: + self.console.print(f"[yellow]⚠️ Microalerta:[/] {metric.upper()} {value} — ajustando homeostase (tick +{NEUROTRON_TICK_STEP:.2f}s)") + # Ajuste simples de segurança + self.tick = min(NEUROTRON_TICK_MAX, self.tick + NEUROTRON_TICK_STEP) + + self.memory.remember("microalert", { + "ts": time.time(), + "alerts": alerts, + "new_tick": self.tick, + }) + + def _color_for_levels(self, cpu, mem, load): + # Heurística simples de cor + try: + load1 = load[0] if (isinstance(load, (list, tuple)) and load) else 0.0 + high = ( + (isinstance(cpu, (int, float)) and cpu >= NEUROTRON_THRESHOLDS["cpu_high"]) or + (isinstance(mem, (int, float)) and mem >= NEUROTRON_THRESHOLDS["mem_high"]) or + (isinstance(load1, (int, float)) and load1 >= NEUROTRON_THRESHOLDS["load1_high"]) + ) + if high: + return "yellow" + except Exception: + pass + return "green" + + def _flush_telemetry(self): + # Grava o buffer de telemetria em JSON (mantendo histórico curto) + try: + data = list(self.telemetry) + with self.telemetry_path.open("w") as f: + json.dump(data, f) + self.memory.remember("telemetry.flush", {"count": len(data), "path": str(self.telemetry_path)}) + except Exception as e: + self.console.print(f"[red]✖ Falha ao gravar telemetria:[/] {e!r}") + self.memory.remember("telemetry.error", {"error": repr(e)}) + + # ——— bus ——— + def bus_publish(self, channel: str, payload: dict) -> None: + self.bus[channel].append(payload) + + def bus_consume(self, channel: str) -> dict | None: + q = self.bus[channel] + return q.popleft() if q else None \ No newline at end of file diff --git a/src/_nfdos/kernel/neurotron/neurotron_core/disk_init.py b/src/_nfdos/kernel/neurotron/neurotron_core/disk_init.py new file mode 100644 index 0000000..13683ab --- /dev/null +++ b/src/_nfdos/kernel/neurotron/neurotron_core/disk_init.py @@ -0,0 +1,259 @@ +#!/usr/bin/env python3 +""" +💾 Módulo de Inicialização de Disco — Neurotron V0.1 (atualizado) +Detecta, avalia, prepara e monta o disco persistente do NFDOS. +- Não formata discos que já contenham um filesystem conhecido, a menos que forçado. +- Forçar formatação: + * EXPORT: export NFDOS_FORCE_FORMAT=1 (no ambiente do initramfs, se aplicável) + * Kernel cmdline: adicionar `nfdos_force_format=1` ao -append do QEMU +""" + +import os +import subprocess +from pathlib import Path +from rich.console import Console +if __name__ == "__main__" and __package__ is None: + import sys + from pathlib import Path + sys.path.append(str(Path(__file__).resolve().parents[1])) + __package__ = "neurotron_core" + +from .neurotron_config import ( + MOUNT_POINT, DISK_CANDIDATES +) + +console = Console() + +def run(cmd: list[str]) -> bool: + """Executa comando silenciosamente (retorna True se OK).""" + try: + subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True) + return True + except (subprocess.CalledProcessError, FileNotFoundError): + return False + + +def detect_disk() -> str | None: + """Procura por um dispositivo de disco válido (por ordem em DISK_CANDIDATES).""" + for dev in DISK_CANDIDATES: + p = Path(dev) + if p.exists(): + console.print(f"[cyan]🔍 Detetado disco:[/] {dev}") + return dev + console.print("[yellow]⚠️ Nenhum disco detectado.[/yellow]") + return None + + +def blkid_check(device: str) -> str | None: + """Tenta obter tipo com blkid (se disponível).""" + try: + out = subprocess.run(["blkid", device], stdout=subprocess.PIPE, text=True, check=False) + return out.stdout.strip() if out.stdout else None + except FileNotFoundError: + return None + + +def read_sig(device: str, size: int = 2048) -> bytes | None: + """Lê os primeiros `size` bytes do device (se possível).""" + try: + with open(device, "rb") as f: + return f.read(size) + except Exception: + return None + + +def detect_fs_by_magic(device: str) -> str | None: + """ + Detecta assinaturas simples: + - ext4 superblock magic (0xEF53) @ offset 1024 + 56 = 1080 + - NTFS -> 'NTFS ' @ offset 3 + - FAT32 -> 'FAT32' nos offsets típicos do boot sector + - MBR partition table signature 0x55AA @ offset 510-511 + Retorna string com o sistema ou None. + """ + buf = read_sig(device, size=4096) + if not buf: + return None + + # MBR signature + if len(buf) >= 512 and buf[510:512] == b'\x55\xAA': + # detecta tabela de partições existente (MBR) + return "mbr-partition-table" + + # ext magic at 1024+56 = 1080 + if len(buf) >= 1082 and buf[1080:1082] == b'\x53\xEF': + return "ext (superblock)" + + # NTFS signature at offset 3 (ASCII "NTFS ") + if len(buf) >= 11 and buf[3:11] == b'NTFS ': + return "ntfs" + + # FAT32 signature at offset 82 or boot sector strings containing FAT + if b"FAT32" in buf or b"FAT16" in buf or b"FAT12" in buf: + return "fat" + + return None + + +def parse_cmdline_flag() -> bool: + """Lê /proc/cmdline para a flag nfdos_force_format=1""" + try: + with open("/proc/cmdline", "r") as f: + cmd = f.read() + return "nfdos_force_format=1" in cmd.split() + except Exception: + return False + +def which(prog: str) -> str | None: + for p in os.environ.get("PATH", "/sbin:/bin:/usr/sbin:/usr/bin").split(":"): + cand = Path(p) / prog + if cand.exists() and os.access(cand, os.X_OK): + return str(cand) + return None + +def format_ext4(device: str, label: str = "NFDOS_DATA") -> bool: + """Formata o dispositivo com ext4, recolhendo logs de erro detalhados (BusyBox-safe).""" + mke2fs = which("mke2fs") + mkfs_ext4 = which("mkfs.ext4") + mkfs = which("mkfs") + + candidates = [] + + if mkfs_ext4: + candidates.append(([mkfs_ext4, "-F", "-L", label, device], "mkfs.ext4")) + if mke2fs: + # o BusyBox mke2fs não aceita '-t', por isso ajustaremos dentro do loop + candidates.append(([mke2fs, "-F", "-t", "ext4", "-L", label, device], "mke2fs")) + if mkfs: + candidates.append(([mkfs, "-t", "ext4", "-F", "-L", label, device], "mkfs")) + + if not candidates: + console.print("[red]❌ Nenhum utilitário mkfs disponível no initramfs![/red]") + return False + + for cmd, name in candidates: + console.print(f"[yellow]⚙️ Formatando {device} com {name}...[/yellow]") + + # 👉 se for o BusyBox mke2fs, removemos o argumento -t + if name == "mke2fs": + cmd = [c for c in cmd if c != "-t" and c != "ext4"] + + try: + result = subprocess.run( + cmd, + text=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + check=True, + ) + if result.stdout: + console.print(result.stdout.strip()) + console.print(f"[green]✔ Formatação concluída com {name}.[/green]") + return True + + except subprocess.CalledProcessError as e: + console.print(f"[red]❌ {name} falhou (código {e.returncode}).[/red]") + if e.stdout: + console.print(f"[cyan]📜 STDOUT:[/cyan]\n{e.stdout.strip()}") + if e.stderr: + console.print(f"[magenta]⚠️ STDERR:[/magenta]\n{e.stderr.strip()}") + + console.print("[red]❌ Nenhum método de formatação teve sucesso.[/red]") + console.print("[cyan]🧠 Sugestão:[/] verifique se o kernel suporta EXT4 e se o BusyBox inclui mke2fs.") + return False + + +def ensure_fs(device: str) -> bool: + """ + Verifica se existe sistema de ficheiros. + Se não existir e houver confirmação/flag, formata ext4 (ou fallback via mke2fs). + """ + # 1️⃣ tentativa rápida com blkid + info = blkid_check(device) + if info: + console.print(f"[green]🧠 Disco já formatado (blkid):[/] {info}") + return True + + # 2️⃣ fallback por leituras de assinatura + sig = detect_fs_by_magic(device) + if sig: + console.print(f"[yellow]⚠ Assinatura detectada no disco:[/] {sig}") + console.print("[red]❗ O disco contém dados ou partições existentes. Abortando formatação.[/red]") + return False + + # 3️⃣ se nada detectado — disco virgem + forced_env = os.environ.get("NFDOS_FORCE_FORMAT") == "1" + forced_cmd = parse_cmdline_flag() + + if not (forced_env or forced_cmd): + console.print("[yellow]⚠ Disco parece virgem, mas não há confirmação para formatar.[/yellow]") + console.print("Use `nfdos_force_format=1` no kernel cmdline ou export NFDOS_FORCE_FORMAT=1") + console.print("para permitir formatação automática.") + return False + + # 4️⃣ tentar formatação + console.print(f"[yellow]⚙️ Forçando formatação de {device} como ext4 (FLAG DETETADA)...[/yellow]") + + ok = format_ext4(device) + if ok: + console.print("[green]✔ Formatação concluída com sucesso.[/green]") + return True + + # 5️⃣ se nada funcionou + console.print("[red]❌ Falha na formatação.[/red]") + console.print("[cyan]🧠 Sugestão:[/] verifique se o kernel inclui suporte para EXT4 ou se o mkfs/mke2fs está embutido no BusyBox.") + return False + + +def mount_disk(device: str) -> bool: + """Monta o disco no ponto esperado (retorna True se OK).""" + os.makedirs(MOUNT_POINT, exist_ok=True) + return run(["mount", device, MOUNT_POINT]) + + +def debug_env(): + """Mostra informações úteis quando nenhum disco é detectado (ou para debug).""" + console.print("[yellow]🩻 DEBUG: listando /dev/* e últimas mensagens do kernel[/yellow]") + devs = sorted(Path("/dev").glob("*")) + console.print("📂 Dispositivos disponíveis:", ", ".join([d.name for d in devs if d.is_char_device() or d.is_block_device()])) + os.system("dmesg | tail -n 20 || echo '(dmesg não disponível)'") + console.print("[yellow]───────────────────────────────[/yellow]") + os.system("echo '--- /proc/partitions ---'; cat /proc/partitions || true") + os.system("echo '--- dmesg | grep -i virtio ---'; dmesg | grep -i virtio || true") + console.print("[yellow]───────────────────────────────[/yellow]") + + +def initialize_persistence(): + """Fluxo completo de inicialização do hipocampo físico.""" + device = detect_disk() + if not device: + debug_env() + console.print("[red]❌ Nenhum disco físico encontrado — usando modo RAM.[/red]") + return False + + if not ensure_fs(device): + console.print("[red]❌ Preparação do sistema de ficheiros foi interrompida.[/red]") + return False + + if not mount_disk(device): + console.print("[red]❌ Falha ao montar disco.[/red]") + return False + + console.print(f"[green]✔ Disco montado em:[/] {MOUNT_POINT}") + + telemetry_file = Path("/opt/kernel/neurotron/data/telemetry.json") + telemetry_file.parent.mkdir(parents=True, exist_ok=True) + if not telemetry_file.exists(): + telemetry_file.write_text("[]") + + + for d in ["data", "logs", "dna"]: + Path(MOUNT_POINT, d).mkdir(parents=True, exist_ok=True) + Path(MOUNT_POINT, "DNA_ID").write_text("NEUROTRON_HIPOCAMPUS_V1\n") + + console.print("[cyan]👉 Hipocampo físico inicializado com sucesso.[/cyan]") + return True + + +if __name__ == "__main__": + initialize_persistence() diff --git a/src/_nfdos/kernel/neurotron/neurotron_core/hippocampus.py b/src/_nfdos/kernel/neurotron/neurotron_core/hippocampus.py new file mode 100644 index 0000000..073bb5c --- /dev/null +++ b/src/_nfdos/kernel/neurotron/neurotron_core/hippocampus.py @@ -0,0 +1,34 @@ +from pathlib import Path +from datetime import datetime + +try: + import orjson as json +except Exception: # fallback leve + import json # type: ignore + +class Hippocampus: + """ + Memória contextual simples (JSON Lines): append-only. + Guarda perceções, decisões e ações para replays futuros. + """ + def __init__(self, log_dir: Path): + self.log_dir = log_dir + self.events_file = log_dir / "events.jsonl" + + def remember(self, kind: str, data: dict) -> None: + rec = { + "ts": datetime.utcnow().isoformat() + "Z", + "kind": kind, + "data": data, + } + try: + if "orjson" in json.__name__: + blob = json.dumps(rec) + else: + blob = json.dumps(rec) # type: ignore + with self.events_file.open("ab") as f: + f.write(blob if isinstance(blob, bytes) else blob.encode("utf-8")) + f.write(b"\n") + except Exception: + # evitar crash por IO em early boot + pass diff --git a/src/_nfdos/kernel/neurotron/neurotron_core/motor.py b/src/_nfdos/kernel/neurotron/neurotron_core/motor.py new file mode 100644 index 0000000..b6a466a --- /dev/null +++ b/src/_nfdos/kernel/neurotron/neurotron_core/motor.py @@ -0,0 +1,27 @@ +import subprocess + +class Motor: + """ + Ator do sistema: executa comandos controlados (whitelist). + Mantém-se minimal até termos política de segurança mais rica. + """ + SAFE_CMDS = { + "echo": ["echo"], + "sh": ["/bin/sh"], # shell interativo (init) + } + + def run(self, cmd: str, args: list[str] | None = None) -> dict: + prog = self.SAFE_CMDS.get(cmd) + if not prog: + return {"ok": False, "error": f"cmd '{cmd}' não permitido"} + try: + full = prog + (args or []) + res = subprocess.run(full, capture_output=True, text=True) + return { + "ok": res.returncode == 0, + "code": res.returncode, + "stdout": res.stdout, + "stderr": res.stderr, + } + except Exception as e: + return {"ok": False, "error": str(e)} diff --git a/src/_nfdos/kernel/neurotron/neurotron_core/neuron.py b/src/_nfdos/kernel/neurotron/neurotron_core/neuron.py new file mode 100644 index 0000000..972102c --- /dev/null +++ b/src/_nfdos/kernel/neurotron/neurotron_core/neuron.py @@ -0,0 +1,30 @@ +from typing import Any, Dict + +class Neuron: + """ + Classe-base de um “neurónio-agente”. + Cada neurónio pode observar/agir e trocar mensagens via o bus do Cortex. + """ + name = "Neuron" + + def __init__(self, ctx: "Cortex"): + self.ctx = ctx + + def observe(self) -> None: + """Ler estado do mundo (sensores, /proc, eventos).""" + return + + def think(self) -> None: + """Processar/planejar usando o estado atual.""" + return + + def act(self) -> None: + """Executar uma ação (opcional).""" + return + + # Utilitários + def publish(self, channel: str, payload: Dict[str, Any]) -> None: + self.ctx.bus_publish(channel, payload) + + def consume(self, channel: str) -> Dict[str, Any] | None: + return self.ctx.bus_consume(channel) diff --git a/src/_nfdos/kernel/neurotron/neurotron_core/neurotron_config.py b/src/_nfdos/kernel/neurotron/neurotron_core/neurotron_config.py new file mode 100644 index 0000000..660d925 --- /dev/null +++ b/src/_nfdos/kernel/neurotron/neurotron_core/neurotron_config.py @@ -0,0 +1,124 @@ +""" +🧠 neurotron_config.py +NFDOS — Núcleo de parâmetros vitais do Neurotron +------------------------------------------------ +Este ficheiro centraliza todos os ajustes simbólicos e técnicos +do ciclo cognitivo do Neurotron. + +Versão: 0.1 (Nascimento) +""" + +from pathlib import Path + +# ====================================== +# 🌐 Diretórios e Caminhos +# ====================================== + +BASE_DIR = Path(__file__).resolve().parents[1] # /opt/kernel/neurotron/ +CORE_DIR = BASE_DIR / "neurotron_core" +LOG_DIR = Path("/var/log/neurotron") # pode não existir ainda no rootfs +RUNTIME_DIR = Path("/var/run/neurotron") +MOUNT_POINT = "/var/neurotron" +DISK_CANDIDATES = ["/dev/vda", "/dev/vdb", "/dev/sda", "/dev/hda"] + +# ====================================== +# ⚙️ Parâmetros Cognitivos Principais +# ====================================== + +# Tempo entre ciclos cognitivos (em segundos) +NEUROTRON_TICK = 1.0 + +# Verbosidade dos logs (0 = silêncio, 1 = normal, 2 = debug) +NEUROTRON_VERBOSITY = 1 + +# Modo de operação +# - diagnostic: executa verificações de integridade +# - learning: ativa ciclos adaptativos (tree, etc.) +# - simulation: executa comportamento contínuo de observação +NEUROTRON_MODE = "diagnostic" + +# Limite de homeostase (auto-regulação) +# Se CPU ou memória ultrapassarem este valor (%), o sistema reduz ritmo +NEUROTRON_HOMEOSTASIS = 85.0 + +HOMEOSTASIS_CPU_WARN = 70.0 # % +HOMEOSTASIS_CPU_ALERT = 85.0 # % +HOMEOSTASIS_MEM_WARN = 75.0 # % +HOMEOSTASIS_MEM_ALERT = 90.0 # % +HOMEOSTASIS_LOAD_WARN = 1.5 # média 1-min (ajuste ao teu core single/SMT) +HOMEOSTASIS_LOAD_ALERT = 3.0 + +NEUROTRON_DIAG_EVERY_TICKS = 5 # a cada N ciclos cognitivos, reavaliar sinais vitais +NEUROTRON_TICK_MIN = 0.5 +NEUROTRON_TICK_MAX = 3.0 +NEUROTRON_TICK_STEP = 0.25 + +# Entropia (seed) para gerar comportamentos pseudoaleatórios +NEUROTRON_SEED = 42 + +# Tamanho máximo da memória do Hipocampo (em KB) +NEUROTRON_MEMORY_SIZE = 256 # define quando o sistema começa a "esquecer" + +# ====================================== +# 🧩 Parâmetros de Subsistemas +# ====================================== + +# Cortex — núcleo de decisão +CORTEX_MAX_THREADS = 1 # threads de raciocínio simultâneo +CORTEX_LOOP_DELAY = 0.1 # tempo entre ciclos internos + +# Hippocampus — memória +HIPPOCAMPUS_LOG_RETENTION = 100 # número máximo de logs guardados +HIPPOCAMPUS_AUTOSAVE = True # ativa auto-gravação entre ciclos + +# Motor — saída / ação +MOTOR_OUTPUT_DEVICE = "console" # destino: console, log, cloud (futuro) +MOTOR_SHOW_SYMBOLS = True # exibe símbolos (🧠, ⚙️, etc.) + +# Perception — sensores +PERCEPTION_CPU_SOURCE = "/proc/stat" +PERCEPTION_MEM_SOURCE = "/proc/meminfo" +PERCEPTION_UPDATE_INTERVAL = 2.0 # segundos entre medições + +# ====================================== +# 🧠 Parâmetros Futuros (placeholders) +# ====================================== + +# Modo de expansão (para versões futuras) +# "none", "networked", "distributed" +NEUROTRON_EXPANSION_MODE = "none" + +# Caminho do dataset local (para aprendizagem offline) +NEUROTRON_DATASET_PATH = BASE_DIR / "data" +NEUROTRON_HISTORY_KEEP = 8 # manter últimas N entradas no ficheiro + +# Identificador de schema para upgrades +NEUROTRON_DIAG_SCHEMA = "v4" + +# --- Telemetria e Heartbeat (V5) --- +HEARTBEAT_ENABLED = True # Mostrar batimento a cada tick +HEARTBEAT_STYLE = "compact" # "compact" ou "verbose" + +# Limiares de microalertas (homeostase) +NEUROTRON_THRESHOLDS = { + "cpu_high": 85.0, # % + "mem_high": 90.0, # % + "load1_high": 2.0, # load avg(1min) +} + +# Buffer de telemetria em memória e flush periódico +TELEMETRY_MAXLEN = 64 +TELEMETRY_FLUSH_EVERY_TICKS = 5 # de tempos a tempos, gravar em JSON + +# ====================================== +# 🧭 Utilitário: impressão de parâmetros +# ====================================== + +def show_config(): + """Mostra a configuração atual do Neurotron""" + import json + cfg = {k: v for k, v in globals().items() if k.startswith("NEUROTRON_")} + print(json.dumps(cfg, indent=2, default=str)) + +if __name__ == "__main__": + show_config() diff --git a/src/_nfdos/kernel/neurotron/neurotron_core/perception.py b/src/_nfdos/kernel/neurotron/neurotron_core/perception.py new file mode 100644 index 0000000..3ecbbbd --- /dev/null +++ b/src/_nfdos/kernel/neurotron/neurotron_core/perception.py @@ -0,0 +1,89 @@ +import os +from time import sleep + +class Perception: + """ + Sensores internos via /proc: + - CPU % calculado por delta de /proc/stat + - Memória % via /proc/meminfo + - Carga média via /proc/loadavg + Sem dependências externas (psutil). + """ + + def _read_proc_stat(self): + try: + with open("/proc/stat", "r") as f: + line = f.readline() + if not line.startswith("cpu "): + return None + parts = line.strip().split()[1:] + vals = list(map(int, parts[:10])) # user nice system idle iowait irq softirq steal guest guest_nice + return { + "user": vals[0], "nice": vals[1], "system": vals[2], "idle": vals[3], + "iowait": vals[4], "irq": vals[5], "softirq": vals[6], "steal": vals[7], + "guest": vals[8], "guest_nice": vals[9], + } + except Exception: + return None + + def _cpu_percent(self, interval=0.05): + a = self._read_proc_stat() + if not a: + return "?" + sleep(interval) # micro-janelinha + b = self._read_proc_stat() + if not b: + return "?" + idle_a = a["idle"] + a["iowait"] + idle_b = b["idle"] + b["iowait"] + non_a = sum(a.values()) - idle_a + non_b = sum(b.values()) - idle_b + total_a = idle_a + non_a + total_b = idle_b + non_b + totald = total_b - total_a + idled = idle_b - idle_a + if totald <= 0: + return "?" + usage = (totald - idled) * 100.0 / totald + return round(usage, 1) + + def _mem_percent(self): + try: + info = {} + with open("/proc/meminfo", "r") as f: + for line in f: + k, v = line.split(":", 1) + info[k.strip()] = v.strip() + def kB(key): + if key not in info: return None + return float(info[key].split()[0]) # kB + mem_total = kB("MemTotal") + mem_avail = kB("MemAvailable") + if not mem_total or mem_avail is None: + return "?" + used = mem_total - mem_avail + return round(used * 100.0 / mem_total, 1) + except Exception: + return "?" + + def _loadavg(self): + try: + if hasattr(os, "getloadavg"): + l1, l5, l15 = os.getloadavg() + return [round(l1, 2), round(l5, 2), round(l15, 2)] + with open("/proc/loadavg", "r") as f: + parts = f.read().strip().split() + l1, l5, l15 = map(float, parts[:3]) + return [round(l1, 2), round(l5, 2), round(l15, 2)] + except Exception: + return ["?", "?", "?"] + + def snapshot(self) -> dict: + return { + "env_user": os.environ.get("USER") or "root", + "env_term": os.environ.get("TERM") or "unknown", + "cpu_percent": self._cpu_percent(), + "mem_percent": self._mem_percent(), + "loadavg": self._loadavg(), + } + diff --git a/src/_nfdos/kernel/neurotron/neurotron_core/telemetry_tail.py b/src/_nfdos/kernel/neurotron/neurotron_core/telemetry_tail.py new file mode 100644 index 0000000..b006bf8 --- /dev/null +++ b/src/_nfdos/kernel/neurotron/neurotron_core/telemetry_tail.py @@ -0,0 +1,95 @@ +#!/usr/bin/env python3 +""" +📊 Painel de Telemetria do Neurotron — V0.1 +Lê o ficheiro telemetry.json e mostra um mini-ECG digital: + ▂▃▄▅▆▇█ + +Execução: + python3 /opt/kernel/neurotron/neurotron_core/telemetry_tail.py +""" + +import json +import time +import os +from pathlib import Path +from statistics import mean +from rich.console import Console +from rich.table import Table +from rich.panel import Panel + +DATASET = "/opt/kernel/neurotron/data/telemetry.json" +BAR_CHARS = "▁▂▃▄▅▆▇█" +SAMPLES = 24 # quantas amostras recentes mostrar +REFRESH = 2.0 # segundos entre atualizações + + +def mini_graph(values, width=24): + """Desenha barras simples tipo sparkline""" + if not values: + return "·" * width + vals = [v for v in values if isinstance(v, (int, float))] + if not vals: + return "·" * width + lo, hi = min(vals), max(vals) + span = (hi - lo) or 1.0 + bars = [] + for v in vals[-width:]: + if not isinstance(v, (int, float)): + bars.append("·") + continue + i = int(round((v - lo) / span * (len(BAR_CHARS) - 1))) + bars.append(BAR_CHARS[i]) + return "".join(bars) + + +def read_telemetry(path: str): + try: + data = json.loads(Path(path).read_text() or "[]") + return data[-SAMPLES:] + except Exception: + return [] + + +def render_panel(console, data): + if not data: + console.print("[yellow]Nenhum dado de telemetria disponível.[/yellow]") + return + + cpu = [d.get("cpu") for d in data if isinstance(d.get("cpu"), (int, float))] + mem = [d.get("mem") for d in data if isinstance(d.get("mem"), (int, float))] + load = [d.get("load")[0] for d in data if isinstance(d.get("load"), (list, tuple)) and isinstance(d.get("load")[0], (int, float))] + + table = Table(show_header=True, header_style="bold cyan") + table.add_column("Sinal Vital", justify="left") + table.add_column("Tendência", justify="left") + table.add_column("Média", justify="right") + + table.add_row("CPU (%)", mini_graph(cpu), f"{mean(cpu):.1f}%" if cpu else "?") + table.add_row("Memória (%)", mini_graph(mem), f"{mean(mem):.1f}%" if mem else "?") + table.add_row("Carga (1min)", mini_graph(load), f"{mean(load):.2f}" if load else "?") + + panel = Panel(table, title="🩺 TELEMETRIA RECENTE", border_style="green") + console.clear() + console.print(panel) + + +def main(): + console = Console() + console.print("[bold cyan]Neurotron Telemetry Tail — Iniciar Monitorização[/bold cyan]\n") + + while True: + if not Path(DATASET).exists(): + console.print(f"[yellow]A aguardar dados em {DATASET}...[/yellow]") + time.sleep(REFRESH) + continue + + data = read_telemetry(DATASET) + if not data: + console.print("[yellow]Nenhum dado de telemetria disponível.[/yellow]") + else: + render_panel(console, data) + time.sleep(REFRESH) + + +if __name__ == "__main__": + main() diff --git a/src/_nfdos/kernel/neurotron/neurotron_main.py b/src/_nfdos/kernel/neurotron/neurotron_main.py new file mode 100644 index 0000000..7f2f6cf --- /dev/null +++ b/src/_nfdos/kernel/neurotron/neurotron_main.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python3 +""" +Neurotron — ponto de entrada do “cérebro” do NFDOS. +Boot flow: init (BusyBox) → Python → Neurotron (este ficheiro). +""" + +import os +import sys +import time +import json +from datetime import datetime +from pathlib import Path +from rich.console import Console +from rich.panel import Panel +from rich.table import Table +from rich.pretty import pprint + +from neurotron_core import neurotron_config +from neurotron_core.autodiagnostic import AutoDiagnostic +from neurotron_core.perception import Perception +from neurotron_core.neurotron_config import ( + NEUROTRON_TICK, NEUROTRON_MODE, NEUROTRON_HOMEOSTASIS, + CORTEX_LOOP_DELAY, MOTOR_OUTPUT_DEVICE, BASE_DIR, CORE_DIR +) + +console = Console() + +# Caminho base do Neurotron +if not CORE_DIR.exists(): + console.print(f"[red]Erro:[/] diretório esperado não encontrado: {CORE_DIR}") + sys.exit(1) +if str(CORE_DIR) not in sys.path: + sys.path.insert(0, str(CORE_DIR)) + +console.print("[cyan]🧩 Mapa Neural de Importação:[/cyan]") +for p in sys.path: + console.print(f" - {p}") + +from neurotron_core.cortex import Cortex # noqa: E402 + + +def detect_persistent_mount() -> bool: + """Verifica se o hipocampo físico está montado em /var/neurotron""" + mount_point = Path("/var/neurotron") + try: + if mount_point.exists() and os.path.ismount(mount_point): + console.print(f"[green]💾 Hipocampo físico montado:[/] {mount_point}") + return True + else: + # fallback: check by /proc/mounts in early boot + with open("/proc/mounts") as f: + for line in f: + if " /var/neurotron " in line: + console.print(f"[green]💾 Hipocampo físico montado (via /proc):[/] {mount_point}") + return True + except Exception as e: + console.print(f"[yellow]⚠ Falha ao verificar montagem persistente:[/] {e}") + return False + + +def main(): + persistent_mode = detect_persistent_mount() + if persistent_mode: + os.environ["NEUROTRON_MODE"] = "persistent" + os.environ["NEUROTRON_RUNTIME"] = "/var/neurotron/data" + os.environ["NEUROTRON_LOG"] = "/var/neurotron/logs" + else: + os.environ["NEUROTRON_MODE"] = "volatile" + os.environ["NEUROTRON_RUNTIME"] = "/tmp/neurotron_data" + os.environ["NEUROTRON_LOG"] = "/tmp/neurotron_logs" + + runtime_dir = Path(os.environ["NEUROTRON_RUNTIME"]) + log_dir = Path(os.environ["NEUROTRON_LOG"]) + runtime_dir.mkdir(parents=True, exist_ok=True) + log_dir.mkdir(parents=True, exist_ok=True) + + mode = os.environ["NEUROTRON_MODE"] + console.print(f"[cyan]🌍 Modo atual do Neurotron:[/] [bold]{mode.upper()}[/]") + + # inicializa o Córtex + cortex = Cortex(runtime_dir=runtime_dir, log_dir=log_dir, tick_seconds=NEUROTRON_TICK) + + try: + cortex.boot() + state = cortex.diagnostic._load_previous().get("state", "?") + console.print(f"[cyan]🩺 Estado inicial:[/] {state}\n") + console.print("[green]👉 Neurotron inicializado com sucesso.[/]\n") + console.print("[green]✔ [Mensagem Simbolica] Boot OK[/]\n") + + console.print("[green]✔ Iniciando ciclo cognitivo.[/]\n") + while True: + cortex.observe() + cortex.think() + cortex.act() + cortex.rest() + + except KeyboardInterrupt: + console.print("[yellow]⚠ Interrompido pelo utilizador (SIGINT)[/]") + cortex.shutdown(reason="SIGINT") + + except SystemExit: + cortex.shutdown(reason="SystemExit") + + except Exception as e: + console.print(f"[red]💥 Exceção não tratada:[/] {e}") + cortex.fatal(e) + + +if __name__ == "__main__": + main() + diff --git a/src/_nfdos/kernel/vga.c b/src/_nfdos/kernel/vga.c new file mode 100644 index 0000000..df8977a --- /dev/null +++ b/src/_nfdos/kernel/vga.c @@ -0,0 +1,29 @@ +#include "vga.h" + +static size_t row = 0, col = 0; + +void vga_clear(uint8_t color) { + for (size_t r = 0; r < VGA_HEIGHT; r++) { + for (size_t c = 0; c < VGA_WIDTH; c++) { + VGA_MEM[r * VGA_WIDTH + c] = vga_entry(' ', color); + } + } + row = col = 0; +} + +static void putc(char c, uint8_t color) { + if (c == '\n') { + col = 0; + if (++row >= VGA_HEIGHT) row = 0; + return; + } + VGA_MEM[row * VGA_WIDTH + col] = vga_entry(c, color); + if (++col >= VGA_WIDTH) { + col = 0; + if (++row >= VGA_HEIGHT) row = 0; + } +} + +void vga_write_str(const char* s, uint8_t color) { + for (; *s; ++s) putc(*s, color); +} diff --git a/src/_nfdos/kernel/vga.h b/src/_nfdos/kernel/vga.h new file mode 100644 index 0000000..506967b --- /dev/null +++ b/src/_nfdos/kernel/vga.h @@ -0,0 +1,18 @@ +#ifndef VGA_H +#define VGA_H + +#include +#include + +#define VGA_WIDTH 80 +#define VGA_HEIGHT 25 +#define VGA_MEM ((volatile uint16_t*)0xB8000) + +static inline uint16_t vga_entry(char c, uint8_t color) { + return (uint16_t)c | (uint16_t)color << 8; +} + +void vga_clear(uint8_t color); +void vga_write_str(const char* s, uint8_t color); + +#endif diff --git a/src/_nfdos/kernel/vga.o b/src/_nfdos/kernel/vga.o new file mode 100644 index 0000000..52fcc9e Binary files /dev/null and b/src/_nfdos/kernel/vga.o differ diff --git a/src/_nfdos/libs/libs_manifest.json b/src/_nfdos/libs/libs_manifest.json new file mode 100644 index 0000000..744ef4d --- /dev/null +++ b/src/_nfdos/libs/libs_manifest.json @@ -0,0 +1,5 @@ +{ + "rich": { + "version": "latest" + } +} \ No newline at end of file diff --git a/src/_nfdos/libs/markdown_it_py-4.0.0-py3-none-any.whl b/src/_nfdos/libs/markdown_it_py-4.0.0-py3-none-any.whl new file mode 100644 index 0000000..941523c Binary files /dev/null and b/src/_nfdos/libs/markdown_it_py-4.0.0-py3-none-any.whl differ diff --git a/src/_nfdos/libs/mdurl-0.1.2-py3-none-any.whl b/src/_nfdos/libs/mdurl-0.1.2-py3-none-any.whl new file mode 100644 index 0000000..6b8b6ab Binary files /dev/null and b/src/_nfdos/libs/mdurl-0.1.2-py3-none-any.whl differ diff --git a/src/_nfdos/libs/pygments-2.19.2-py3-none-any.whl b/src/_nfdos/libs/pygments-2.19.2-py3-none-any.whl new file mode 100644 index 0000000..3ec3f10 Binary files /dev/null and b/src/_nfdos/libs/pygments-2.19.2-py3-none-any.whl differ diff --git a/src/_nfdos/libs/rich-14.2.0-py3-none-any.whl b/src/_nfdos/libs/rich-14.2.0-py3-none-any.whl new file mode 100644 index 0000000..e2c4d88 Binary files /dev/null and b/src/_nfdos/libs/rich-14.2.0-py3-none-any.whl differ diff --git a/src/_nfdos/python b/src/_nfdos/python new file mode 100755 index 0000000..74e5744 Binary files /dev/null and b/src/_nfdos/python differ diff --git a/src/_nfdos/rootfs/bin/arch b/src/_nfdos/rootfs/bin/arch new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/arch @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/ash b/src/_nfdos/rootfs/bin/ash new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/ash @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/base32 b/src/_nfdos/rootfs/bin/base32 new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/base32 @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/base64 b/src/_nfdos/rootfs/bin/base64 new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/base64 @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/busybox b/src/_nfdos/rootfs/bin/busybox new file mode 100755 index 0000000..bf99776 Binary files /dev/null and b/src/_nfdos/rootfs/bin/busybox differ diff --git a/src/_nfdos/rootfs/bin/cat b/src/_nfdos/rootfs/bin/cat new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/cat @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/chattr b/src/_nfdos/rootfs/bin/chattr new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/chattr @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/chgrp b/src/_nfdos/rootfs/bin/chgrp new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/chgrp @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/chmod b/src/_nfdos/rootfs/bin/chmod new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/chmod @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/chown b/src/_nfdos/rootfs/bin/chown new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/chown @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/conspy b/src/_nfdos/rootfs/bin/conspy new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/conspy @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/cp b/src/_nfdos/rootfs/bin/cp new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/cp @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/cpio b/src/_nfdos/rootfs/bin/cpio new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/cpio @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/cttyhack b/src/_nfdos/rootfs/bin/cttyhack new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/cttyhack @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/date b/src/_nfdos/rootfs/bin/date new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/date @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/dd b/src/_nfdos/rootfs/bin/dd new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/dd @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/df b/src/_nfdos/rootfs/bin/df new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/df @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/dmesg b/src/_nfdos/rootfs/bin/dmesg new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/dmesg @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/dnsdomainname b/src/_nfdos/rootfs/bin/dnsdomainname new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/dnsdomainname @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/dumpkmap b/src/_nfdos/rootfs/bin/dumpkmap new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/dumpkmap @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/echo b/src/_nfdos/rootfs/bin/echo new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/echo @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/ed b/src/_nfdos/rootfs/bin/ed new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/ed @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/egrep b/src/_nfdos/rootfs/bin/egrep new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/egrep @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/false b/src/_nfdos/rootfs/bin/false new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/false @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/fatattr b/src/_nfdos/rootfs/bin/fatattr new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/fatattr @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/fdflush b/src/_nfdos/rootfs/bin/fdflush new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/fdflush @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/fgrep b/src/_nfdos/rootfs/bin/fgrep new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/fgrep @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/fsync b/src/_nfdos/rootfs/bin/fsync new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/fsync @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/getopt b/src/_nfdos/rootfs/bin/getopt new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/getopt @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/grep b/src/_nfdos/rootfs/bin/grep new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/grep @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/gunzip b/src/_nfdos/rootfs/bin/gunzip new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/gunzip @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/gzip b/src/_nfdos/rootfs/bin/gzip new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/gzip @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/hostname b/src/_nfdos/rootfs/bin/hostname new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/hostname @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/hush b/src/_nfdos/rootfs/bin/hush new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/hush @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/ionice b/src/_nfdos/rootfs/bin/ionice new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/ionice @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/iostat b/src/_nfdos/rootfs/bin/iostat new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/iostat @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/ipcalc b/src/_nfdos/rootfs/bin/ipcalc new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/ipcalc @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/kbd_mode b/src/_nfdos/rootfs/bin/kbd_mode new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/kbd_mode @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/kill b/src/_nfdos/rootfs/bin/kill new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/kill @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/link b/src/_nfdos/rootfs/bin/link new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/link @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/linux32 b/src/_nfdos/rootfs/bin/linux32 new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/linux32 @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/linux64 b/src/_nfdos/rootfs/bin/linux64 new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/linux64 @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/ln b/src/_nfdos/rootfs/bin/ln new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/ln @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/login b/src/_nfdos/rootfs/bin/login new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/login @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/ls b/src/_nfdos/rootfs/bin/ls new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/ls @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/lsattr b/src/_nfdos/rootfs/bin/lsattr new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/lsattr @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/lzop b/src/_nfdos/rootfs/bin/lzop new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/lzop @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/makemime b/src/_nfdos/rootfs/bin/makemime new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/makemime @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/mkdir b/src/_nfdos/rootfs/bin/mkdir new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/mkdir @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/mknod b/src/_nfdos/rootfs/bin/mknod new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/mknod @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/mktemp b/src/_nfdos/rootfs/bin/mktemp new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/mktemp @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/more b/src/_nfdos/rootfs/bin/more new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/more @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/mount b/src/_nfdos/rootfs/bin/mount new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/mount @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/mountpoint b/src/_nfdos/rootfs/bin/mountpoint new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/mountpoint @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/mpstat b/src/_nfdos/rootfs/bin/mpstat new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/mpstat @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/mt b/src/_nfdos/rootfs/bin/mt new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/mt @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/mv b/src/_nfdos/rootfs/bin/mv new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/mv @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/netstat b/src/_nfdos/rootfs/bin/netstat new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/netstat @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/nice b/src/_nfdos/rootfs/bin/nice new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/nice @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/pidof b/src/_nfdos/rootfs/bin/pidof new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/pidof @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/ping b/src/_nfdos/rootfs/bin/ping new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/ping @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/ping6 b/src/_nfdos/rootfs/bin/ping6 new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/ping6 @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/pipe_progress b/src/_nfdos/rootfs/bin/pipe_progress new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/pipe_progress @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/printenv b/src/_nfdos/rootfs/bin/printenv new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/printenv @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/ps b/src/_nfdos/rootfs/bin/ps new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/ps @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/pwd b/src/_nfdos/rootfs/bin/pwd new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/pwd @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/reformime b/src/_nfdos/rootfs/bin/reformime new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/reformime @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/resume b/src/_nfdos/rootfs/bin/resume new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/resume @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/rev b/src/_nfdos/rootfs/bin/rev new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/rev @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/rm b/src/_nfdos/rootfs/bin/rm new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/rm @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/rmdir b/src/_nfdos/rootfs/bin/rmdir new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/rmdir @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/rpm b/src/_nfdos/rootfs/bin/rpm new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/rpm @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/run-parts b/src/_nfdos/rootfs/bin/run-parts new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/run-parts @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/scriptreplay b/src/_nfdos/rootfs/bin/scriptreplay new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/scriptreplay @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/sed b/src/_nfdos/rootfs/bin/sed new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/sed @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/setarch b/src/_nfdos/rootfs/bin/setarch new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/setarch @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/setpriv b/src/_nfdos/rootfs/bin/setpriv new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/setpriv @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/setserial b/src/_nfdos/rootfs/bin/setserial new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/setserial @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/sh b/src/_nfdos/rootfs/bin/sh new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/sh @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/sleep b/src/_nfdos/rootfs/bin/sleep new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/sleep @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/stat b/src/_nfdos/rootfs/bin/stat new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/stat @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/stty b/src/_nfdos/rootfs/bin/stty new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/stty @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/su b/src/_nfdos/rootfs/bin/su new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/su @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/sync b/src/_nfdos/rootfs/bin/sync new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/sync @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/tar b/src/_nfdos/rootfs/bin/tar new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/tar @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/touch b/src/_nfdos/rootfs/bin/touch new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/touch @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/true b/src/_nfdos/rootfs/bin/true new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/true @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/umount b/src/_nfdos/rootfs/bin/umount new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/umount @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/uname b/src/_nfdos/rootfs/bin/uname new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/uname @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/usleep b/src/_nfdos/rootfs/bin/usleep new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/usleep @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/vi b/src/_nfdos/rootfs/bin/vi new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/vi @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/watch b/src/_nfdos/rootfs/bin/watch new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/watch @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/bin/zcat b/src/_nfdos/rootfs/bin/zcat new file mode 120000 index 0000000..c3fa810 --- /dev/null +++ b/src/_nfdos/rootfs/bin/zcat @@ -0,0 +1 @@ +busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/init b/src/_nfdos/rootfs/init new file mode 100755 index 0000000..5f05f9e --- /dev/null +++ b/src/_nfdos/rootfs/init @@ -0,0 +1,26 @@ +#!/bin/sh + +mount -t proc proc /proc +mount -t sysfs sys /sys +mount -t devtmpfs devtmpfs /dev + +if [ -f /opt/kernel/neurotron/neurotron_main.py ]; then + export PYTHONHOME=/usr + export PYTHONPATH=/usr/lib/python3.13:/usr/lib/python3.13/site-packages + export PATH=/sbin:/bin:/usr/sbin:/usr/bin + + echo '👉 Inicializando hipocampo físico...' + /usr/bin/python3 /opt/kernel/neurotron/neurotron_core/disk_init.py + + echo '👉 Inicializando o Neurotron...' + /usr/bin/python3 /opt/kernel/neurotron/neurotron_main.py || echo "⚠️ Neurotron falhou" & + + sleep 5 + + echo '👉 Inicializando Painel de Telemetria do Neurotron...' + /usr/bin/python3 /opt/kernel/neurotron/neurotron_core/telemetry_tail.py +else + echo '⚙️ BusyBox ativo — Neurotron ausente.' +fi + +exec /bin/sh diff --git a/src/_nfdos/rootfs/linuxrc b/src/_nfdos/rootfs/linuxrc new file mode 120000 index 0000000..f9f7342 --- /dev/null +++ b/src/_nfdos/rootfs/linuxrc @@ -0,0 +1 @@ +bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/opt/kernel/neurotron/__init__.py b/src/_nfdos/rootfs/opt/kernel/neurotron/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/_nfdos/rootfs/opt/kernel/neurotron/neurotron_core/autodiagnostic.py b/src/_nfdos/rootfs/opt/kernel/neurotron/neurotron_core/autodiagnostic.py new file mode 100644 index 0000000..0646a59 --- /dev/null +++ b/src/_nfdos/rootfs/opt/kernel/neurotron/neurotron_core/autodiagnostic.py @@ -0,0 +1,185 @@ +from __future__ import annotations +import json, os +from datetime import datetime, timezone +from rich.console import Console +from rich.table import Table +from pathlib import Path + +from .neurotron_config import ( + NEUROTRON_DATASET_PATH, NEUROTRON_HISTORY_KEEP, NEUROTRON_DIAG_SCHEMA, + HOMEOSTASIS_CPU_WARN, HOMEOSTASIS_CPU_ALERT, + HOMEOSTASIS_MEM_WARN, HOMEOSTASIS_MEM_ALERT, + HOMEOSTASIS_LOAD_WARN, HOMEOSTASIS_LOAD_ALERT, +) +from .perception import Perception + +console = Console() + +def _now_iso(): + return datetime.now(timezone.utc).isoformat() + +class AutoDiagnostic: + def __init__(self, runtime_dir: str, log_dir: str): + self.runtime_dir = runtime_dir + self.log_dir = log_dir + self.data_dir = Path(NEUROTRON_DATASET_PATH) + self.data_dir.mkdir(parents=True, exist_ok=True) + self.last_file = self.data_dir / "last_diagnostic.json" + + self.perception = Perception() + self.current = None + self.previous = None + + def _load_previous(self): + if not self.last_file.exists(): + return None + try: + with open(self.last_file, "r") as f: + return json.load(f) + except Exception: + return None + + def _save_current(self, payload: dict): + history = [] + if self.last_file.exists(): + try: + with open(self.last_file, "r") as f: + prev = json.load(f) + history = prev.get("history", []) + history.append({ + "timestamp": prev.get("timestamp"), + "cpu_percent": prev.get("cpu_percent"), + "mem_percent": prev.get("mem_percent"), + "loadavg": prev.get("loadavg"), + "state": prev.get("state", "UNKNOWN"), + }) + history = history[-NEUROTRON_HISTORY_KEEP:] + except Exception: + history = [] + payload["history"] = history + with open(self.last_file, "w") as f: + json.dump(payload, f, indent=2) + + def _classify_state(self, cpu, mem, l1): + # valores podem ser "?" + try: + cpu = float(cpu) + mem = float(mem) + l1 = float(l1) + except Exception: + return "UNKNOWN" + + # ALERT/CRITICAL + if cpu >= HOMEOSTASIS_CPU_ALERT or mem >= HOMEOSTASIS_MEM_ALERT or l1 >= HOMEOSTASIS_LOAD_ALERT: + return "CRITICAL" + if cpu >= HOMEOSTASIS_CPU_WARN or mem >= HOMEOSTASIS_MEM_WARN or l1 >= HOMEOSTASIS_LOAD_WARN: + return "ALERT" + + # OKs + return "STABLE" + + def _delta(self, a, b): + try: + if isinstance(a, list) and isinstance(b, list) and len(a) == len(b): + return [round(float(x) - float(y), 2) for x, y in zip(a, b)] + return round(float(a) - float(b), 2) + except Exception: + return "?" + + def _render_mini_trend(self, values, width=24, charset="▁▂▃▄▅▆▇█"): + if not values: + return "" + lo = min(values); hi = max(values) + if not isinstance(lo, (int, float)) or not isinstance(hi, (int, float)): + return "" + span = (hi - lo) or 1.0 + levels = len(charset) - 1 + bars = [] + for v in values[-width:]: + if not isinstance(v, (int, float)): + bars.append("·") + continue + i = int(round((v - lo) / span * levels)) + bars.append(charset[i]) + return "".join(bars) + + def run_exam(self): + console.print("\n[bold]🤖 Iniciando rotina de Auto-Diagnóstico Evolutivo...[/bold]\n") + + snap = self.perception.snapshot() + cpu = snap.get("cpu_percent", "?") + mem = snap.get("mem_percent", "?") + load = snap.get("loadavg", ["?", "?", "?"]) + + prev = self._load_previous() + self.previous = prev + + # deltas + cpu_prev = prev.get("cpu_percent") if prev else "?" + mem_prev = prev.get("mem_percent") if prev else "?" + load_prev = prev.get("loadavg") if prev else ["?", "?", "?"] + + d_cpu = self._delta(cpu, cpu_prev) + d_mem = self._delta(mem, mem_prev) + d_load = self._delta(load, load_prev) + + # estado + l1 = load[0] if isinstance(load, list) and load else "?" + state = self._classify_state(cpu, mem, l1) + + # tabela + table = Table(title="🩺 Exame Clínico Evolutivo", show_lines=True) + table.add_column("Sinal Vital") + table.add_column("Atual", justify="right") + table.add_column("Δ", justify="center") + table.add_column("Anterior", justify="right") + + def fmt(v): + if isinstance(v, list): + return str(v) + return str(v) + + table.add_row("CPU (%)", fmt(cpu), fmt(d_cpu), fmt(cpu_prev)) + table.add_row("Memória (%)", fmt(mem), fmt(d_mem), fmt(mem_prev)) + table.add_row("Carga média (1/5/15)", fmt(load), "≈" if d_load == "?" else fmt(d_load), fmt(load_prev)) + console.print(table) + + payload = { + "schema": NEUROTRON_DIAG_SCHEMA, + "timestamp": _now_iso(), + "cpu_percent": cpu, + "mem_percent": mem, + "loadavg": load, + "state": state, + "env": { + "user": snap.get("env_user"), + "term": snap.get("env_term"), + }, + } + self._save_current(payload) + console.print(f"[green]✔ Histórico evolutivo atualizado em:[/green] \n{self.last_file}") + + # Atualiza telemetria contínua + try: + telemetry_file = Path(NEUROTRON_DATASET_PATH) / "telemetry.json" + telemetry_file.parent.mkdir(parents=True, exist_ok=True) + + telemetry = [] + if telemetry_file.exists(): + telemetry = json.loads(telemetry_file.read_text() or "[]") + + telemetry.append({ + "timestamp": payload["timestamp"], + "cpu": payload.get("cpu_percent"), + "mem": payload.get("mem_percent"), + "load": payload.get("loadavg"), + "state": payload.get("state"), + }) + + telemetry = telemetry[-128:] # manter últimas 128 amostras + telemetry_file.write_text(json.dumps(telemetry, indent=2)) + except Exception as e: + console.print(f"[yellow]⚠️ Falha ao atualizar telemetria:[/] {e}") + + + return state, payload diff --git a/src/_nfdos/rootfs/opt/kernel/neurotron/neurotron_core/cortex.py b/src/_nfdos/rootfs/opt/kernel/neurotron/neurotron_core/cortex.py new file mode 100644 index 0000000..8102e77 --- /dev/null +++ b/src/_nfdos/rootfs/opt/kernel/neurotron/neurotron_core/cortex.py @@ -0,0 +1,230 @@ +import json +import time +from collections import defaultdict, deque +from pathlib import Path +from time import sleep +from rich.console import Console + +from neuron import Neuron +from hippocampus import Hippocampus +from perception import Perception +from motor import Motor + +from .neurotron_config import ( + NEUROTRON_MODE, NEUROTRON_TICK, NEUROTRON_TICK_MIN, NEUROTRON_TICK_MAX, NEUROTRON_TICK_STEP, + NEUROTRON_DIAG_EVERY_TICKS, NEUROTRON_DATASET_PATH, + HEARTBEAT_ENABLED, HEARTBEAT_STYLE, NEUROTRON_THRESHOLDS, + TELEMETRY_MAXLEN, TELEMETRY_FLUSH_EVERY_TICKS, +) +from .autodiagnostic import AutoDiagnostic + + +class VitalSigns(Neuron): + name = "VitalSigns" + def observe(self) -> None: + snap = self.ctx.perception.snapshot() + self.publish("vitals", snap) + self.ctx.memory.remember("observe.vitals", snap) + + +class EchoAgent(Neuron): + name = "EchoAgent" + def think(self) -> None: + msg = self.consume("vitals") + if msg: + self.publish("actions", {"action": "echo", "text": f"CPU {msg.get('cpu_percent', '?')}%"}) + + +class Cortex: + """ + Orquestrador: liga neurónios, bus de mensagens, memória, IO e ciclo cognitivo. + Agora com Telemetria Contínua (V5): heartbeat, microalertas e flush periódico. + """ + def __init__(self, runtime_dir, log_dir, tick_seconds=NEUROTRON_TICK): + self.runtime_dir = runtime_dir + self.log_dir = log_dir + self.tick = float(tick_seconds) + self.mode = NEUROTRON_MODE + self._tick_count = 0 + self.diagnostic = AutoDiagnostic(runtime_dir, log_dir) + + self.console = Console() + self.memory = Hippocampus(log_dir=log_dir) + self.perception = Perception() + self.motor = Motor() + + # Message bus simples: channels → deque + self.bus = defaultdict(lambda: deque(maxlen=32)) + + # Telemetria em memória (curto prazo) + self.telemetry = deque(maxlen=TELEMETRY_MAXLEN) + + # Regista neurónios (podes adicionar mais à medida) + self.neurons: list[Neuron] = [ + VitalSigns(self), + EchoAgent(self), + ] + + self._booted = False + + # Caminho para gravar a telemetria + self.telemetry_path = Path(NEUROTRON_DATASET_PATH) / "telemetry.json" + self.telemetry_path.parent.mkdir(parents=True, exist_ok=True) + + # ——— ciclo de vida ——— + def boot(self) -> None: + if self._booted: + return + self.console.print("[bold cyan]🧠 Neurotron[/] — boot") + self.memory.remember("boot", {"version": "0.1", "tick": self.tick}) + self._booted = True + state, _ = self.diagnostic.run_exam() + self._apply_homeostasis(state) + + def _apply_homeostasis(self, state): + if state == "CRITICAL": + self.mode = "diagnostic" + self.tick = min(NEUROTRON_TICK_MAX, self.tick + NEUROTRON_TICK_STEP) + elif state == "ALERT": + self.tick = min(NEUROTRON_TICK_MAX, self.tick + NEUROTRON_TICK_STEP / 2) + elif state == "STABLE": + self.tick = max(NEUROTRON_TICK_MIN, self.tick - NEUROTRON_TICK_STEP / 2) + # UNKNOWN → não mexe + + def shutdown(self, reason: str = ""): + self.console.print(f"[yellow]shutdown:[/] {reason}") + self.memory.remember("shutdown", {"reason": reason}) + + def fatal(self, e: Exception): + self.console.print(f"[red]fatal:[/] {e!r}") + self.memory.remember("fatal", {"error": repr(e)}) + print(f"fatal: {repr(e)}") + raise + + # ——— loop ——— + def observe(self) -> None: + for n in self.neurons: + n.observe() + + def think(self) -> None: + for n in self.neurons: + n.think() + + def act(self) -> None: + # Consumir ações agregadas e executar + action = self.bus_consume("actions") + if action and action.get("action") == "echo": + res = self.motor.run("echo", [action.get("text", "")]) + self.memory.remember("act.echo", res) + if res.get("stdout"): + self.console.print(f"[green]{res['stdout'].strip()}[/]") + + def rest(self): + # Heartbeat e microalertas antes de dormir + if HEARTBEAT_ENABLED: + self._heartbeat_and_telemetry() + + # Pausa regulada + sleep(self.tick) + + # Contador e rotinas periódicas + self._tick_count += 1 + + if self._tick_count % NEUROTRON_DIAG_EVERY_TICKS == 0: + state, _ = self.diagnostic.run_exam() + self._apply_homeostasis(state) + + if self._tick_count % TELEMETRY_FLUSH_EVERY_TICKS == 0: + self._flush_telemetry() + + # ——— telemetria/alertas ——— + def _heartbeat_and_telemetry(self): + snap = self.perception.snapshot() + cpu = snap.get("cpu_percent", "?") + mem = (snap.get("mem") or {}).get("percent", "?") + load = snap.get("loadavg") or [] + + # Adiciona ao buffer de telemetria + self.telemetry.append({ + "ts": time.time(), + "cpu": cpu, + "mem": mem, + "load": load, + "tick": self.tick, + }) + + # Microalertas com base nos limiares + self._evaluate_microalerts(cpu, mem, load) + + # Heartbeat visual + color = self._color_for_levels(cpu, mem, load) + if HEARTBEAT_STYLE == "compact": + self.console.print(f"[bold {color}]💓[/] CPU: {cpu}% | MEM: {mem}% | TICK: {self.tick:.2f}s") + else: + self.console.print( + f"[bold {color}]💓 [Heartbeat][/bold {color}] " + f"CPU: {cpu}% | MEM: {mem}% | LOAD: {load} | TICK: {self.tick:.2f}s | MODE: {self.mode}" + ) + + def _evaluate_microalerts(self, cpu, mem, load): + alerts = [] + # Normaliza + load1 = load[0] if (isinstance(load, (list, tuple)) and load) else None + + try: + if isinstance(cpu, (int, float)) and cpu >= NEUROTRON_THRESHOLDS["cpu_high"]: + alerts.append(("cpu", cpu)) + if isinstance(mem, (int, float)) and mem >= NEUROTRON_THRESHOLDS["mem_high"]: + alerts.append(("mem", mem)) + if isinstance(load1, (int, float)) and load1 >= NEUROTRON_THRESHOLDS["load1_high"]: + alerts.append(("load1", load1)) + except KeyError: + pass # thresholds incompletos → sem microalertas + + if not alerts: + return + + for (metric, value) in alerts: + self.console.print(f"[yellow]⚠️ Microalerta:[/] {metric.upper()} {value} — ajustando homeostase (tick +{NEUROTRON_TICK_STEP:.2f}s)") + # Ajuste simples de segurança + self.tick = min(NEUROTRON_TICK_MAX, self.tick + NEUROTRON_TICK_STEP) + + self.memory.remember("microalert", { + "ts": time.time(), + "alerts": alerts, + "new_tick": self.tick, + }) + + def _color_for_levels(self, cpu, mem, load): + # Heurística simples de cor + try: + load1 = load[0] if (isinstance(load, (list, tuple)) and load) else 0.0 + high = ( + (isinstance(cpu, (int, float)) and cpu >= NEUROTRON_THRESHOLDS["cpu_high"]) or + (isinstance(mem, (int, float)) and mem >= NEUROTRON_THRESHOLDS["mem_high"]) or + (isinstance(load1, (int, float)) and load1 >= NEUROTRON_THRESHOLDS["load1_high"]) + ) + if high: + return "yellow" + except Exception: + pass + return "green" + + def _flush_telemetry(self): + # Grava o buffer de telemetria em JSON (mantendo histórico curto) + try: + data = list(self.telemetry) + with self.telemetry_path.open("w") as f: + json.dump(data, f) + self.memory.remember("telemetry.flush", {"count": len(data), "path": str(self.telemetry_path)}) + except Exception as e: + self.console.print(f"[red]✖ Falha ao gravar telemetria:[/] {e!r}") + self.memory.remember("telemetry.error", {"error": repr(e)}) + + # ——— bus ——— + def bus_publish(self, channel: str, payload: dict) -> None: + self.bus[channel].append(payload) + + def bus_consume(self, channel: str) -> dict | None: + q = self.bus[channel] + return q.popleft() if q else None \ No newline at end of file diff --git a/src/_nfdos/rootfs/opt/kernel/neurotron/neurotron_core/disk_init.py b/src/_nfdos/rootfs/opt/kernel/neurotron/neurotron_core/disk_init.py new file mode 100644 index 0000000..13683ab --- /dev/null +++ b/src/_nfdos/rootfs/opt/kernel/neurotron/neurotron_core/disk_init.py @@ -0,0 +1,259 @@ +#!/usr/bin/env python3 +""" +💾 Módulo de Inicialização de Disco — Neurotron V0.1 (atualizado) +Detecta, avalia, prepara e monta o disco persistente do NFDOS. +- Não formata discos que já contenham um filesystem conhecido, a menos que forçado. +- Forçar formatação: + * EXPORT: export NFDOS_FORCE_FORMAT=1 (no ambiente do initramfs, se aplicável) + * Kernel cmdline: adicionar `nfdos_force_format=1` ao -append do QEMU +""" + +import os +import subprocess +from pathlib import Path +from rich.console import Console +if __name__ == "__main__" and __package__ is None: + import sys + from pathlib import Path + sys.path.append(str(Path(__file__).resolve().parents[1])) + __package__ = "neurotron_core" + +from .neurotron_config import ( + MOUNT_POINT, DISK_CANDIDATES +) + +console = Console() + +def run(cmd: list[str]) -> bool: + """Executa comando silenciosamente (retorna True se OK).""" + try: + subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True) + return True + except (subprocess.CalledProcessError, FileNotFoundError): + return False + + +def detect_disk() -> str | None: + """Procura por um dispositivo de disco válido (por ordem em DISK_CANDIDATES).""" + for dev in DISK_CANDIDATES: + p = Path(dev) + if p.exists(): + console.print(f"[cyan]🔍 Detetado disco:[/] {dev}") + return dev + console.print("[yellow]⚠️ Nenhum disco detectado.[/yellow]") + return None + + +def blkid_check(device: str) -> str | None: + """Tenta obter tipo com blkid (se disponível).""" + try: + out = subprocess.run(["blkid", device], stdout=subprocess.PIPE, text=True, check=False) + return out.stdout.strip() if out.stdout else None + except FileNotFoundError: + return None + + +def read_sig(device: str, size: int = 2048) -> bytes | None: + """Lê os primeiros `size` bytes do device (se possível).""" + try: + with open(device, "rb") as f: + return f.read(size) + except Exception: + return None + + +def detect_fs_by_magic(device: str) -> str | None: + """ + Detecta assinaturas simples: + - ext4 superblock magic (0xEF53) @ offset 1024 + 56 = 1080 + - NTFS -> 'NTFS ' @ offset 3 + - FAT32 -> 'FAT32' nos offsets típicos do boot sector + - MBR partition table signature 0x55AA @ offset 510-511 + Retorna string com o sistema ou None. + """ + buf = read_sig(device, size=4096) + if not buf: + return None + + # MBR signature + if len(buf) >= 512 and buf[510:512] == b'\x55\xAA': + # detecta tabela de partições existente (MBR) + return "mbr-partition-table" + + # ext magic at 1024+56 = 1080 + if len(buf) >= 1082 and buf[1080:1082] == b'\x53\xEF': + return "ext (superblock)" + + # NTFS signature at offset 3 (ASCII "NTFS ") + if len(buf) >= 11 and buf[3:11] == b'NTFS ': + return "ntfs" + + # FAT32 signature at offset 82 or boot sector strings containing FAT + if b"FAT32" in buf or b"FAT16" in buf or b"FAT12" in buf: + return "fat" + + return None + + +def parse_cmdline_flag() -> bool: + """Lê /proc/cmdline para a flag nfdos_force_format=1""" + try: + with open("/proc/cmdline", "r") as f: + cmd = f.read() + return "nfdos_force_format=1" in cmd.split() + except Exception: + return False + +def which(prog: str) -> str | None: + for p in os.environ.get("PATH", "/sbin:/bin:/usr/sbin:/usr/bin").split(":"): + cand = Path(p) / prog + if cand.exists() and os.access(cand, os.X_OK): + return str(cand) + return None + +def format_ext4(device: str, label: str = "NFDOS_DATA") -> bool: + """Formata o dispositivo com ext4, recolhendo logs de erro detalhados (BusyBox-safe).""" + mke2fs = which("mke2fs") + mkfs_ext4 = which("mkfs.ext4") + mkfs = which("mkfs") + + candidates = [] + + if mkfs_ext4: + candidates.append(([mkfs_ext4, "-F", "-L", label, device], "mkfs.ext4")) + if mke2fs: + # o BusyBox mke2fs não aceita '-t', por isso ajustaremos dentro do loop + candidates.append(([mke2fs, "-F", "-t", "ext4", "-L", label, device], "mke2fs")) + if mkfs: + candidates.append(([mkfs, "-t", "ext4", "-F", "-L", label, device], "mkfs")) + + if not candidates: + console.print("[red]❌ Nenhum utilitário mkfs disponível no initramfs![/red]") + return False + + for cmd, name in candidates: + console.print(f"[yellow]⚙️ Formatando {device} com {name}...[/yellow]") + + # 👉 se for o BusyBox mke2fs, removemos o argumento -t + if name == "mke2fs": + cmd = [c for c in cmd if c != "-t" and c != "ext4"] + + try: + result = subprocess.run( + cmd, + text=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + check=True, + ) + if result.stdout: + console.print(result.stdout.strip()) + console.print(f"[green]✔ Formatação concluída com {name}.[/green]") + return True + + except subprocess.CalledProcessError as e: + console.print(f"[red]❌ {name} falhou (código {e.returncode}).[/red]") + if e.stdout: + console.print(f"[cyan]📜 STDOUT:[/cyan]\n{e.stdout.strip()}") + if e.stderr: + console.print(f"[magenta]⚠️ STDERR:[/magenta]\n{e.stderr.strip()}") + + console.print("[red]❌ Nenhum método de formatação teve sucesso.[/red]") + console.print("[cyan]🧠 Sugestão:[/] verifique se o kernel suporta EXT4 e se o BusyBox inclui mke2fs.") + return False + + +def ensure_fs(device: str) -> bool: + """ + Verifica se existe sistema de ficheiros. + Se não existir e houver confirmação/flag, formata ext4 (ou fallback via mke2fs). + """ + # 1️⃣ tentativa rápida com blkid + info = blkid_check(device) + if info: + console.print(f"[green]🧠 Disco já formatado (blkid):[/] {info}") + return True + + # 2️⃣ fallback por leituras de assinatura + sig = detect_fs_by_magic(device) + if sig: + console.print(f"[yellow]⚠ Assinatura detectada no disco:[/] {sig}") + console.print("[red]❗ O disco contém dados ou partições existentes. Abortando formatação.[/red]") + return False + + # 3️⃣ se nada detectado — disco virgem + forced_env = os.environ.get("NFDOS_FORCE_FORMAT") == "1" + forced_cmd = parse_cmdline_flag() + + if not (forced_env or forced_cmd): + console.print("[yellow]⚠ Disco parece virgem, mas não há confirmação para formatar.[/yellow]") + console.print("Use `nfdos_force_format=1` no kernel cmdline ou export NFDOS_FORCE_FORMAT=1") + console.print("para permitir formatação automática.") + return False + + # 4️⃣ tentar formatação + console.print(f"[yellow]⚙️ Forçando formatação de {device} como ext4 (FLAG DETETADA)...[/yellow]") + + ok = format_ext4(device) + if ok: + console.print("[green]✔ Formatação concluída com sucesso.[/green]") + return True + + # 5️⃣ se nada funcionou + console.print("[red]❌ Falha na formatação.[/red]") + console.print("[cyan]🧠 Sugestão:[/] verifique se o kernel inclui suporte para EXT4 ou se o mkfs/mke2fs está embutido no BusyBox.") + return False + + +def mount_disk(device: str) -> bool: + """Monta o disco no ponto esperado (retorna True se OK).""" + os.makedirs(MOUNT_POINT, exist_ok=True) + return run(["mount", device, MOUNT_POINT]) + + +def debug_env(): + """Mostra informações úteis quando nenhum disco é detectado (ou para debug).""" + console.print("[yellow]🩻 DEBUG: listando /dev/* e últimas mensagens do kernel[/yellow]") + devs = sorted(Path("/dev").glob("*")) + console.print("📂 Dispositivos disponíveis:", ", ".join([d.name for d in devs if d.is_char_device() or d.is_block_device()])) + os.system("dmesg | tail -n 20 || echo '(dmesg não disponível)'") + console.print("[yellow]───────────────────────────────[/yellow]") + os.system("echo '--- /proc/partitions ---'; cat /proc/partitions || true") + os.system("echo '--- dmesg | grep -i virtio ---'; dmesg | grep -i virtio || true") + console.print("[yellow]───────────────────────────────[/yellow]") + + +def initialize_persistence(): + """Fluxo completo de inicialização do hipocampo físico.""" + device = detect_disk() + if not device: + debug_env() + console.print("[red]❌ Nenhum disco físico encontrado — usando modo RAM.[/red]") + return False + + if not ensure_fs(device): + console.print("[red]❌ Preparação do sistema de ficheiros foi interrompida.[/red]") + return False + + if not mount_disk(device): + console.print("[red]❌ Falha ao montar disco.[/red]") + return False + + console.print(f"[green]✔ Disco montado em:[/] {MOUNT_POINT}") + + telemetry_file = Path("/opt/kernel/neurotron/data/telemetry.json") + telemetry_file.parent.mkdir(parents=True, exist_ok=True) + if not telemetry_file.exists(): + telemetry_file.write_text("[]") + + + for d in ["data", "logs", "dna"]: + Path(MOUNT_POINT, d).mkdir(parents=True, exist_ok=True) + Path(MOUNT_POINT, "DNA_ID").write_text("NEUROTRON_HIPOCAMPUS_V1\n") + + console.print("[cyan]👉 Hipocampo físico inicializado com sucesso.[/cyan]") + return True + + +if __name__ == "__main__": + initialize_persistence() diff --git a/src/_nfdos/rootfs/opt/kernel/neurotron/neurotron_core/hippocampus.py b/src/_nfdos/rootfs/opt/kernel/neurotron/neurotron_core/hippocampus.py new file mode 100644 index 0000000..073bb5c --- /dev/null +++ b/src/_nfdos/rootfs/opt/kernel/neurotron/neurotron_core/hippocampus.py @@ -0,0 +1,34 @@ +from pathlib import Path +from datetime import datetime + +try: + import orjson as json +except Exception: # fallback leve + import json # type: ignore + +class Hippocampus: + """ + Memória contextual simples (JSON Lines): append-only. + Guarda perceções, decisões e ações para replays futuros. + """ + def __init__(self, log_dir: Path): + self.log_dir = log_dir + self.events_file = log_dir / "events.jsonl" + + def remember(self, kind: str, data: dict) -> None: + rec = { + "ts": datetime.utcnow().isoformat() + "Z", + "kind": kind, + "data": data, + } + try: + if "orjson" in json.__name__: + blob = json.dumps(rec) + else: + blob = json.dumps(rec) # type: ignore + with self.events_file.open("ab") as f: + f.write(blob if isinstance(blob, bytes) else blob.encode("utf-8")) + f.write(b"\n") + except Exception: + # evitar crash por IO em early boot + pass diff --git a/src/_nfdos/rootfs/opt/kernel/neurotron/neurotron_core/motor.py b/src/_nfdos/rootfs/opt/kernel/neurotron/neurotron_core/motor.py new file mode 100644 index 0000000..b6a466a --- /dev/null +++ b/src/_nfdos/rootfs/opt/kernel/neurotron/neurotron_core/motor.py @@ -0,0 +1,27 @@ +import subprocess + +class Motor: + """ + Ator do sistema: executa comandos controlados (whitelist). + Mantém-se minimal até termos política de segurança mais rica. + """ + SAFE_CMDS = { + "echo": ["echo"], + "sh": ["/bin/sh"], # shell interativo (init) + } + + def run(self, cmd: str, args: list[str] | None = None) -> dict: + prog = self.SAFE_CMDS.get(cmd) + if not prog: + return {"ok": False, "error": f"cmd '{cmd}' não permitido"} + try: + full = prog + (args or []) + res = subprocess.run(full, capture_output=True, text=True) + return { + "ok": res.returncode == 0, + "code": res.returncode, + "stdout": res.stdout, + "stderr": res.stderr, + } + except Exception as e: + return {"ok": False, "error": str(e)} diff --git a/src/_nfdos/rootfs/opt/kernel/neurotron/neurotron_core/neuron.py b/src/_nfdos/rootfs/opt/kernel/neurotron/neurotron_core/neuron.py new file mode 100644 index 0000000..972102c --- /dev/null +++ b/src/_nfdos/rootfs/opt/kernel/neurotron/neurotron_core/neuron.py @@ -0,0 +1,30 @@ +from typing import Any, Dict + +class Neuron: + """ + Classe-base de um “neurónio-agente”. + Cada neurónio pode observar/agir e trocar mensagens via o bus do Cortex. + """ + name = "Neuron" + + def __init__(self, ctx: "Cortex"): + self.ctx = ctx + + def observe(self) -> None: + """Ler estado do mundo (sensores, /proc, eventos).""" + return + + def think(self) -> None: + """Processar/planejar usando o estado atual.""" + return + + def act(self) -> None: + """Executar uma ação (opcional).""" + return + + # Utilitários + def publish(self, channel: str, payload: Dict[str, Any]) -> None: + self.ctx.bus_publish(channel, payload) + + def consume(self, channel: str) -> Dict[str, Any] | None: + return self.ctx.bus_consume(channel) diff --git a/src/_nfdos/rootfs/opt/kernel/neurotron/neurotron_core/neurotron_config.py b/src/_nfdos/rootfs/opt/kernel/neurotron/neurotron_core/neurotron_config.py new file mode 100644 index 0000000..660d925 --- /dev/null +++ b/src/_nfdos/rootfs/opt/kernel/neurotron/neurotron_core/neurotron_config.py @@ -0,0 +1,124 @@ +""" +🧠 neurotron_config.py +NFDOS — Núcleo de parâmetros vitais do Neurotron +------------------------------------------------ +Este ficheiro centraliza todos os ajustes simbólicos e técnicos +do ciclo cognitivo do Neurotron. + +Versão: 0.1 (Nascimento) +""" + +from pathlib import Path + +# ====================================== +# 🌐 Diretórios e Caminhos +# ====================================== + +BASE_DIR = Path(__file__).resolve().parents[1] # /opt/kernel/neurotron/ +CORE_DIR = BASE_DIR / "neurotron_core" +LOG_DIR = Path("/var/log/neurotron") # pode não existir ainda no rootfs +RUNTIME_DIR = Path("/var/run/neurotron") +MOUNT_POINT = "/var/neurotron" +DISK_CANDIDATES = ["/dev/vda", "/dev/vdb", "/dev/sda", "/dev/hda"] + +# ====================================== +# ⚙️ Parâmetros Cognitivos Principais +# ====================================== + +# Tempo entre ciclos cognitivos (em segundos) +NEUROTRON_TICK = 1.0 + +# Verbosidade dos logs (0 = silêncio, 1 = normal, 2 = debug) +NEUROTRON_VERBOSITY = 1 + +# Modo de operação +# - diagnostic: executa verificações de integridade +# - learning: ativa ciclos adaptativos (tree, etc.) +# - simulation: executa comportamento contínuo de observação +NEUROTRON_MODE = "diagnostic" + +# Limite de homeostase (auto-regulação) +# Se CPU ou memória ultrapassarem este valor (%), o sistema reduz ritmo +NEUROTRON_HOMEOSTASIS = 85.0 + +HOMEOSTASIS_CPU_WARN = 70.0 # % +HOMEOSTASIS_CPU_ALERT = 85.0 # % +HOMEOSTASIS_MEM_WARN = 75.0 # % +HOMEOSTASIS_MEM_ALERT = 90.0 # % +HOMEOSTASIS_LOAD_WARN = 1.5 # média 1-min (ajuste ao teu core single/SMT) +HOMEOSTASIS_LOAD_ALERT = 3.0 + +NEUROTRON_DIAG_EVERY_TICKS = 5 # a cada N ciclos cognitivos, reavaliar sinais vitais +NEUROTRON_TICK_MIN = 0.5 +NEUROTRON_TICK_MAX = 3.0 +NEUROTRON_TICK_STEP = 0.25 + +# Entropia (seed) para gerar comportamentos pseudoaleatórios +NEUROTRON_SEED = 42 + +# Tamanho máximo da memória do Hipocampo (em KB) +NEUROTRON_MEMORY_SIZE = 256 # define quando o sistema começa a "esquecer" + +# ====================================== +# 🧩 Parâmetros de Subsistemas +# ====================================== + +# Cortex — núcleo de decisão +CORTEX_MAX_THREADS = 1 # threads de raciocínio simultâneo +CORTEX_LOOP_DELAY = 0.1 # tempo entre ciclos internos + +# Hippocampus — memória +HIPPOCAMPUS_LOG_RETENTION = 100 # número máximo de logs guardados +HIPPOCAMPUS_AUTOSAVE = True # ativa auto-gravação entre ciclos + +# Motor — saída / ação +MOTOR_OUTPUT_DEVICE = "console" # destino: console, log, cloud (futuro) +MOTOR_SHOW_SYMBOLS = True # exibe símbolos (🧠, ⚙️, etc.) + +# Perception — sensores +PERCEPTION_CPU_SOURCE = "/proc/stat" +PERCEPTION_MEM_SOURCE = "/proc/meminfo" +PERCEPTION_UPDATE_INTERVAL = 2.0 # segundos entre medições + +# ====================================== +# 🧠 Parâmetros Futuros (placeholders) +# ====================================== + +# Modo de expansão (para versões futuras) +# "none", "networked", "distributed" +NEUROTRON_EXPANSION_MODE = "none" + +# Caminho do dataset local (para aprendizagem offline) +NEUROTRON_DATASET_PATH = BASE_DIR / "data" +NEUROTRON_HISTORY_KEEP = 8 # manter últimas N entradas no ficheiro + +# Identificador de schema para upgrades +NEUROTRON_DIAG_SCHEMA = "v4" + +# --- Telemetria e Heartbeat (V5) --- +HEARTBEAT_ENABLED = True # Mostrar batimento a cada tick +HEARTBEAT_STYLE = "compact" # "compact" ou "verbose" + +# Limiares de microalertas (homeostase) +NEUROTRON_THRESHOLDS = { + "cpu_high": 85.0, # % + "mem_high": 90.0, # % + "load1_high": 2.0, # load avg(1min) +} + +# Buffer de telemetria em memória e flush periódico +TELEMETRY_MAXLEN = 64 +TELEMETRY_FLUSH_EVERY_TICKS = 5 # de tempos a tempos, gravar em JSON + +# ====================================== +# 🧭 Utilitário: impressão de parâmetros +# ====================================== + +def show_config(): + """Mostra a configuração atual do Neurotron""" + import json + cfg = {k: v for k, v in globals().items() if k.startswith("NEUROTRON_")} + print(json.dumps(cfg, indent=2, default=str)) + +if __name__ == "__main__": + show_config() diff --git a/src/_nfdos/rootfs/opt/kernel/neurotron/neurotron_core/perception.py b/src/_nfdos/rootfs/opt/kernel/neurotron/neurotron_core/perception.py new file mode 100644 index 0000000..3ecbbbd --- /dev/null +++ b/src/_nfdos/rootfs/opt/kernel/neurotron/neurotron_core/perception.py @@ -0,0 +1,89 @@ +import os +from time import sleep + +class Perception: + """ + Sensores internos via /proc: + - CPU % calculado por delta de /proc/stat + - Memória % via /proc/meminfo + - Carga média via /proc/loadavg + Sem dependências externas (psutil). + """ + + def _read_proc_stat(self): + try: + with open("/proc/stat", "r") as f: + line = f.readline() + if not line.startswith("cpu "): + return None + parts = line.strip().split()[1:] + vals = list(map(int, parts[:10])) # user nice system idle iowait irq softirq steal guest guest_nice + return { + "user": vals[0], "nice": vals[1], "system": vals[2], "idle": vals[3], + "iowait": vals[4], "irq": vals[5], "softirq": vals[6], "steal": vals[7], + "guest": vals[8], "guest_nice": vals[9], + } + except Exception: + return None + + def _cpu_percent(self, interval=0.05): + a = self._read_proc_stat() + if not a: + return "?" + sleep(interval) # micro-janelinha + b = self._read_proc_stat() + if not b: + return "?" + idle_a = a["idle"] + a["iowait"] + idle_b = b["idle"] + b["iowait"] + non_a = sum(a.values()) - idle_a + non_b = sum(b.values()) - idle_b + total_a = idle_a + non_a + total_b = idle_b + non_b + totald = total_b - total_a + idled = idle_b - idle_a + if totald <= 0: + return "?" + usage = (totald - idled) * 100.0 / totald + return round(usage, 1) + + def _mem_percent(self): + try: + info = {} + with open("/proc/meminfo", "r") as f: + for line in f: + k, v = line.split(":", 1) + info[k.strip()] = v.strip() + def kB(key): + if key not in info: return None + return float(info[key].split()[0]) # kB + mem_total = kB("MemTotal") + mem_avail = kB("MemAvailable") + if not mem_total or mem_avail is None: + return "?" + used = mem_total - mem_avail + return round(used * 100.0 / mem_total, 1) + except Exception: + return "?" + + def _loadavg(self): + try: + if hasattr(os, "getloadavg"): + l1, l5, l15 = os.getloadavg() + return [round(l1, 2), round(l5, 2), round(l15, 2)] + with open("/proc/loadavg", "r") as f: + parts = f.read().strip().split() + l1, l5, l15 = map(float, parts[:3]) + return [round(l1, 2), round(l5, 2), round(l15, 2)] + except Exception: + return ["?", "?", "?"] + + def snapshot(self) -> dict: + return { + "env_user": os.environ.get("USER") or "root", + "env_term": os.environ.get("TERM") or "unknown", + "cpu_percent": self._cpu_percent(), + "mem_percent": self._mem_percent(), + "loadavg": self._loadavg(), + } + diff --git a/src/_nfdos/rootfs/opt/kernel/neurotron/neurotron_core/telemetry_tail.py b/src/_nfdos/rootfs/opt/kernel/neurotron/neurotron_core/telemetry_tail.py new file mode 100644 index 0000000..b006bf8 --- /dev/null +++ b/src/_nfdos/rootfs/opt/kernel/neurotron/neurotron_core/telemetry_tail.py @@ -0,0 +1,95 @@ +#!/usr/bin/env python3 +""" +📊 Painel de Telemetria do Neurotron — V0.1 +Lê o ficheiro telemetry.json e mostra um mini-ECG digital: + ▂▃▄▅▆▇█ + +Execução: + python3 /opt/kernel/neurotron/neurotron_core/telemetry_tail.py +""" + +import json +import time +import os +from pathlib import Path +from statistics import mean +from rich.console import Console +from rich.table import Table +from rich.panel import Panel + +DATASET = "/opt/kernel/neurotron/data/telemetry.json" +BAR_CHARS = "▁▂▃▄▅▆▇█" +SAMPLES = 24 # quantas amostras recentes mostrar +REFRESH = 2.0 # segundos entre atualizações + + +def mini_graph(values, width=24): + """Desenha barras simples tipo sparkline""" + if not values: + return "·" * width + vals = [v for v in values if isinstance(v, (int, float))] + if not vals: + return "·" * width + lo, hi = min(vals), max(vals) + span = (hi - lo) or 1.0 + bars = [] + for v in vals[-width:]: + if not isinstance(v, (int, float)): + bars.append("·") + continue + i = int(round((v - lo) / span * (len(BAR_CHARS) - 1))) + bars.append(BAR_CHARS[i]) + return "".join(bars) + + +def read_telemetry(path: str): + try: + data = json.loads(Path(path).read_text() or "[]") + return data[-SAMPLES:] + except Exception: + return [] + + +def render_panel(console, data): + if not data: + console.print("[yellow]Nenhum dado de telemetria disponível.[/yellow]") + return + + cpu = [d.get("cpu") for d in data if isinstance(d.get("cpu"), (int, float))] + mem = [d.get("mem") for d in data if isinstance(d.get("mem"), (int, float))] + load = [d.get("load")[0] for d in data if isinstance(d.get("load"), (list, tuple)) and isinstance(d.get("load")[0], (int, float))] + + table = Table(show_header=True, header_style="bold cyan") + table.add_column("Sinal Vital", justify="left") + table.add_column("Tendência", justify="left") + table.add_column("Média", justify="right") + + table.add_row("CPU (%)", mini_graph(cpu), f"{mean(cpu):.1f}%" if cpu else "?") + table.add_row("Memória (%)", mini_graph(mem), f"{mean(mem):.1f}%" if mem else "?") + table.add_row("Carga (1min)", mini_graph(load), f"{mean(load):.2f}" if load else "?") + + panel = Panel(table, title="🩺 TELEMETRIA RECENTE", border_style="green") + console.clear() + console.print(panel) + + +def main(): + console = Console() + console.print("[bold cyan]Neurotron Telemetry Tail — Iniciar Monitorização[/bold cyan]\n") + + while True: + if not Path(DATASET).exists(): + console.print(f"[yellow]A aguardar dados em {DATASET}...[/yellow]") + time.sleep(REFRESH) + continue + + data = read_telemetry(DATASET) + if not data: + console.print("[yellow]Nenhum dado de telemetria disponível.[/yellow]") + else: + render_panel(console, data) + time.sleep(REFRESH) + + +if __name__ == "__main__": + main() diff --git a/src/_nfdos/rootfs/opt/kernel/neurotron/neurotron_main.py b/src/_nfdos/rootfs/opt/kernel/neurotron/neurotron_main.py new file mode 100644 index 0000000..7f2f6cf --- /dev/null +++ b/src/_nfdos/rootfs/opt/kernel/neurotron/neurotron_main.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python3 +""" +Neurotron — ponto de entrada do “cérebro” do NFDOS. +Boot flow: init (BusyBox) → Python → Neurotron (este ficheiro). +""" + +import os +import sys +import time +import json +from datetime import datetime +from pathlib import Path +from rich.console import Console +from rich.panel import Panel +from rich.table import Table +from rich.pretty import pprint + +from neurotron_core import neurotron_config +from neurotron_core.autodiagnostic import AutoDiagnostic +from neurotron_core.perception import Perception +from neurotron_core.neurotron_config import ( + NEUROTRON_TICK, NEUROTRON_MODE, NEUROTRON_HOMEOSTASIS, + CORTEX_LOOP_DELAY, MOTOR_OUTPUT_DEVICE, BASE_DIR, CORE_DIR +) + +console = Console() + +# Caminho base do Neurotron +if not CORE_DIR.exists(): + console.print(f"[red]Erro:[/] diretório esperado não encontrado: {CORE_DIR}") + sys.exit(1) +if str(CORE_DIR) not in sys.path: + sys.path.insert(0, str(CORE_DIR)) + +console.print("[cyan]🧩 Mapa Neural de Importação:[/cyan]") +for p in sys.path: + console.print(f" - {p}") + +from neurotron_core.cortex import Cortex # noqa: E402 + + +def detect_persistent_mount() -> bool: + """Verifica se o hipocampo físico está montado em /var/neurotron""" + mount_point = Path("/var/neurotron") + try: + if mount_point.exists() and os.path.ismount(mount_point): + console.print(f"[green]💾 Hipocampo físico montado:[/] {mount_point}") + return True + else: + # fallback: check by /proc/mounts in early boot + with open("/proc/mounts") as f: + for line in f: + if " /var/neurotron " in line: + console.print(f"[green]💾 Hipocampo físico montado (via /proc):[/] {mount_point}") + return True + except Exception as e: + console.print(f"[yellow]⚠ Falha ao verificar montagem persistente:[/] {e}") + return False + + +def main(): + persistent_mode = detect_persistent_mount() + if persistent_mode: + os.environ["NEUROTRON_MODE"] = "persistent" + os.environ["NEUROTRON_RUNTIME"] = "/var/neurotron/data" + os.environ["NEUROTRON_LOG"] = "/var/neurotron/logs" + else: + os.environ["NEUROTRON_MODE"] = "volatile" + os.environ["NEUROTRON_RUNTIME"] = "/tmp/neurotron_data" + os.environ["NEUROTRON_LOG"] = "/tmp/neurotron_logs" + + runtime_dir = Path(os.environ["NEUROTRON_RUNTIME"]) + log_dir = Path(os.environ["NEUROTRON_LOG"]) + runtime_dir.mkdir(parents=True, exist_ok=True) + log_dir.mkdir(parents=True, exist_ok=True) + + mode = os.environ["NEUROTRON_MODE"] + console.print(f"[cyan]🌍 Modo atual do Neurotron:[/] [bold]{mode.upper()}[/]") + + # inicializa o Córtex + cortex = Cortex(runtime_dir=runtime_dir, log_dir=log_dir, tick_seconds=NEUROTRON_TICK) + + try: + cortex.boot() + state = cortex.diagnostic._load_previous().get("state", "?") + console.print(f"[cyan]🩺 Estado inicial:[/] {state}\n") + console.print("[green]👉 Neurotron inicializado com sucesso.[/]\n") + console.print("[green]✔ [Mensagem Simbolica] Boot OK[/]\n") + + console.print("[green]✔ Iniciando ciclo cognitivo.[/]\n") + while True: + cortex.observe() + cortex.think() + cortex.act() + cortex.rest() + + except KeyboardInterrupt: + console.print("[yellow]⚠ Interrompido pelo utilizador (SIGINT)[/]") + cortex.shutdown(reason="SIGINT") + + except SystemExit: + cortex.shutdown(reason="SystemExit") + + except Exception as e: + console.print(f"[red]💥 Exceção não tratada:[/] {e}") + cortex.fatal(e) + + +if __name__ == "__main__": + main() + diff --git a/src/_nfdos/rootfs/sbin/acpid b/src/_nfdos/rootfs/sbin/acpid new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/acpid @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/adjtimex b/src/_nfdos/rootfs/sbin/adjtimex new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/adjtimex @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/arp b/src/_nfdos/rootfs/sbin/arp new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/arp @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/blkid b/src/_nfdos/rootfs/sbin/blkid new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/blkid @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/blockdev b/src/_nfdos/rootfs/sbin/blockdev new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/blockdev @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/bootchartd b/src/_nfdos/rootfs/sbin/bootchartd new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/bootchartd @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/depmod b/src/_nfdos/rootfs/sbin/depmod new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/depmod @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/devmem b/src/_nfdos/rootfs/sbin/devmem new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/devmem @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/fbsplash b/src/_nfdos/rootfs/sbin/fbsplash new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/fbsplash @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/fdisk b/src/_nfdos/rootfs/sbin/fdisk new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/fdisk @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/findfs b/src/_nfdos/rootfs/sbin/findfs new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/findfs @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/freeramdisk b/src/_nfdos/rootfs/sbin/freeramdisk new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/freeramdisk @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/fsck b/src/_nfdos/rootfs/sbin/fsck new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/fsck @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/fsck.minix b/src/_nfdos/rootfs/sbin/fsck.minix new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/fsck.minix @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/fstrim b/src/_nfdos/rootfs/sbin/fstrim new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/fstrim @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/getty b/src/_nfdos/rootfs/sbin/getty new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/getty @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/halt b/src/_nfdos/rootfs/sbin/halt new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/halt @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/hdparm b/src/_nfdos/rootfs/sbin/hdparm new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/hdparm @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/hwclock b/src/_nfdos/rootfs/sbin/hwclock new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/hwclock @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/ifconfig b/src/_nfdos/rootfs/sbin/ifconfig new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/ifconfig @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/ifdown b/src/_nfdos/rootfs/sbin/ifdown new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/ifdown @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/ifenslave b/src/_nfdos/rootfs/sbin/ifenslave new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/ifenslave @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/ifup b/src/_nfdos/rootfs/sbin/ifup new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/ifup @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/init b/src/_nfdos/rootfs/sbin/init new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/init @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/insmod b/src/_nfdos/rootfs/sbin/insmod new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/insmod @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/ip b/src/_nfdos/rootfs/sbin/ip new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/ip @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/ipaddr b/src/_nfdos/rootfs/sbin/ipaddr new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/ipaddr @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/iplink b/src/_nfdos/rootfs/sbin/iplink new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/iplink @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/ipneigh b/src/_nfdos/rootfs/sbin/ipneigh new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/ipneigh @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/iproute b/src/_nfdos/rootfs/sbin/iproute new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/iproute @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/iprule b/src/_nfdos/rootfs/sbin/iprule new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/iprule @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/iptunnel b/src/_nfdos/rootfs/sbin/iptunnel new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/iptunnel @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/klogd b/src/_nfdos/rootfs/sbin/klogd new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/klogd @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/loadkmap b/src/_nfdos/rootfs/sbin/loadkmap new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/loadkmap @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/logread b/src/_nfdos/rootfs/sbin/logread new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/logread @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/losetup b/src/_nfdos/rootfs/sbin/losetup new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/losetup @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/lsmod b/src/_nfdos/rootfs/sbin/lsmod new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/lsmod @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/makedevs b/src/_nfdos/rootfs/sbin/makedevs new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/makedevs @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/mdev b/src/_nfdos/rootfs/sbin/mdev new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/mdev @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/mkdosfs b/src/_nfdos/rootfs/sbin/mkdosfs new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/mkdosfs @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/mke2fs b/src/_nfdos/rootfs/sbin/mke2fs new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/mke2fs @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/mkfs.ext2 b/src/_nfdos/rootfs/sbin/mkfs.ext2 new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/mkfs.ext2 @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/mkfs.minix b/src/_nfdos/rootfs/sbin/mkfs.minix new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/mkfs.minix @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/mkfs.vfat b/src/_nfdos/rootfs/sbin/mkfs.vfat new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/mkfs.vfat @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/mkswap b/src/_nfdos/rootfs/sbin/mkswap new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/mkswap @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/modinfo b/src/_nfdos/rootfs/sbin/modinfo new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/modinfo @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/modprobe b/src/_nfdos/rootfs/sbin/modprobe new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/modprobe @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/nameif b/src/_nfdos/rootfs/sbin/nameif new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/nameif @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/pivot_root b/src/_nfdos/rootfs/sbin/pivot_root new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/pivot_root @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/poweroff b/src/_nfdos/rootfs/sbin/poweroff new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/poweroff @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/raidautorun b/src/_nfdos/rootfs/sbin/raidautorun new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/raidautorun @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/reboot b/src/_nfdos/rootfs/sbin/reboot new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/reboot @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/rmmod b/src/_nfdos/rootfs/sbin/rmmod new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/rmmod @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/route b/src/_nfdos/rootfs/sbin/route new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/route @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/run-init b/src/_nfdos/rootfs/sbin/run-init new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/run-init @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/runlevel b/src/_nfdos/rootfs/sbin/runlevel new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/runlevel @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/setconsole b/src/_nfdos/rootfs/sbin/setconsole new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/setconsole @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/slattach b/src/_nfdos/rootfs/sbin/slattach new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/slattach @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/start-stop-daemon b/src/_nfdos/rootfs/sbin/start-stop-daemon new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/start-stop-daemon @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/sulogin b/src/_nfdos/rootfs/sbin/sulogin new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/sulogin @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/swapoff b/src/_nfdos/rootfs/sbin/swapoff new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/swapoff @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/swapon b/src/_nfdos/rootfs/sbin/swapon new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/swapon @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/switch_root b/src/_nfdos/rootfs/sbin/switch_root new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/switch_root @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/sysctl b/src/_nfdos/rootfs/sbin/sysctl new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/sysctl @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/syslogd b/src/_nfdos/rootfs/sbin/syslogd new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/syslogd @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/tunctl b/src/_nfdos/rootfs/sbin/tunctl new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/tunctl @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/udhcpc b/src/_nfdos/rootfs/sbin/udhcpc new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/udhcpc @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/uevent b/src/_nfdos/rootfs/sbin/uevent new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/uevent @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/vconfig b/src/_nfdos/rootfs/sbin/vconfig new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/vconfig @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/watchdog b/src/_nfdos/rootfs/sbin/watchdog new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/watchdog @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/sbin/zcip b/src/_nfdos/rootfs/sbin/zcip new file mode 120000 index 0000000..7125971 --- /dev/null +++ b/src/_nfdos/rootfs/sbin/zcip @@ -0,0 +1 @@ +../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/[ b/src/_nfdos/rootfs/usr/bin/[ new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/[ @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/[[ b/src/_nfdos/rootfs/usr/bin/[[ new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/[[ @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/ascii b/src/_nfdos/rootfs/usr/bin/ascii new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/ascii @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/awk b/src/_nfdos/rootfs/usr/bin/awk new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/awk @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/basename b/src/_nfdos/rootfs/usr/bin/basename new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/basename @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/bc b/src/_nfdos/rootfs/usr/bin/bc new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/bc @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/beep b/src/_nfdos/rootfs/usr/bin/beep new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/beep @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/blkdiscard b/src/_nfdos/rootfs/usr/bin/blkdiscard new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/blkdiscard @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/bunzip2 b/src/_nfdos/rootfs/usr/bin/bunzip2 new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/bunzip2 @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/bzcat b/src/_nfdos/rootfs/usr/bin/bzcat new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/bzcat @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/bzip2 b/src/_nfdos/rootfs/usr/bin/bzip2 new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/bzip2 @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/cal b/src/_nfdos/rootfs/usr/bin/cal new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/cal @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/chpst b/src/_nfdos/rootfs/usr/bin/chpst new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/chpst @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/chrt b/src/_nfdos/rootfs/usr/bin/chrt new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/chrt @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/chvt b/src/_nfdos/rootfs/usr/bin/chvt new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/chvt @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/cksum b/src/_nfdos/rootfs/usr/bin/cksum new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/cksum @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/clear b/src/_nfdos/rootfs/usr/bin/clear new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/clear @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/cmp b/src/_nfdos/rootfs/usr/bin/cmp new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/cmp @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/comm b/src/_nfdos/rootfs/usr/bin/comm new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/comm @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/crc32 b/src/_nfdos/rootfs/usr/bin/crc32 new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/crc32 @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/crontab b/src/_nfdos/rootfs/usr/bin/crontab new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/crontab @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/cryptpw b/src/_nfdos/rootfs/usr/bin/cryptpw new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/cryptpw @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/cut b/src/_nfdos/rootfs/usr/bin/cut new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/cut @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/dc b/src/_nfdos/rootfs/usr/bin/dc new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/dc @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/deallocvt b/src/_nfdos/rootfs/usr/bin/deallocvt new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/deallocvt @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/diff b/src/_nfdos/rootfs/usr/bin/diff new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/diff @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/dirname b/src/_nfdos/rootfs/usr/bin/dirname new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/dirname @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/dos2unix b/src/_nfdos/rootfs/usr/bin/dos2unix new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/dos2unix @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/dpkg b/src/_nfdos/rootfs/usr/bin/dpkg new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/dpkg @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/dpkg-deb b/src/_nfdos/rootfs/usr/bin/dpkg-deb new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/dpkg-deb @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/du b/src/_nfdos/rootfs/usr/bin/du new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/du @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/dumpleases b/src/_nfdos/rootfs/usr/bin/dumpleases new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/dumpleases @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/eject b/src/_nfdos/rootfs/usr/bin/eject new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/eject @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/env b/src/_nfdos/rootfs/usr/bin/env new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/env @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/envdir b/src/_nfdos/rootfs/usr/bin/envdir new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/envdir @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/envuidgid b/src/_nfdos/rootfs/usr/bin/envuidgid new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/envuidgid @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/expand b/src/_nfdos/rootfs/usr/bin/expand new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/expand @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/expr b/src/_nfdos/rootfs/usr/bin/expr new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/expr @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/factor b/src/_nfdos/rootfs/usr/bin/factor new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/factor @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/fallocate b/src/_nfdos/rootfs/usr/bin/fallocate new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/fallocate @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/fgconsole b/src/_nfdos/rootfs/usr/bin/fgconsole new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/fgconsole @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/find b/src/_nfdos/rootfs/usr/bin/find new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/find @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/flock b/src/_nfdos/rootfs/usr/bin/flock new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/flock @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/fold b/src/_nfdos/rootfs/usr/bin/fold new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/fold @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/free b/src/_nfdos/rootfs/usr/bin/free new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/free @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/ftpget b/src/_nfdos/rootfs/usr/bin/ftpget new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/ftpget @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/ftpput b/src/_nfdos/rootfs/usr/bin/ftpput new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/ftpput @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/fuser b/src/_nfdos/rootfs/usr/bin/fuser new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/fuser @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/getfattr b/src/_nfdos/rootfs/usr/bin/getfattr new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/getfattr @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/groups b/src/_nfdos/rootfs/usr/bin/groups new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/groups @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/hd b/src/_nfdos/rootfs/usr/bin/hd new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/hd @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/head b/src/_nfdos/rootfs/usr/bin/head new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/head @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/hexdump b/src/_nfdos/rootfs/usr/bin/hexdump new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/hexdump @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/hexedit b/src/_nfdos/rootfs/usr/bin/hexedit new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/hexedit @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/hostid b/src/_nfdos/rootfs/usr/bin/hostid new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/hostid @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/id b/src/_nfdos/rootfs/usr/bin/id new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/id @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/install b/src/_nfdos/rootfs/usr/bin/install new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/install @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/ipcrm b/src/_nfdos/rootfs/usr/bin/ipcrm new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/ipcrm @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/ipcs b/src/_nfdos/rootfs/usr/bin/ipcs new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/ipcs @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/killall b/src/_nfdos/rootfs/usr/bin/killall new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/killall @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/last b/src/_nfdos/rootfs/usr/bin/last new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/last @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/less b/src/_nfdos/rootfs/usr/bin/less new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/less @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/logger b/src/_nfdos/rootfs/usr/bin/logger new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/logger @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/logname b/src/_nfdos/rootfs/usr/bin/logname new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/logname @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/lpq b/src/_nfdos/rootfs/usr/bin/lpq new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/lpq @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/lpr b/src/_nfdos/rootfs/usr/bin/lpr new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/lpr @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/lsof b/src/_nfdos/rootfs/usr/bin/lsof new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/lsof @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/lspci b/src/_nfdos/rootfs/usr/bin/lspci new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/lspci @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/lsscsi b/src/_nfdos/rootfs/usr/bin/lsscsi new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/lsscsi @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/lsusb b/src/_nfdos/rootfs/usr/bin/lsusb new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/lsusb @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/lzcat b/src/_nfdos/rootfs/usr/bin/lzcat new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/lzcat @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/lzma b/src/_nfdos/rootfs/usr/bin/lzma new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/lzma @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/man b/src/_nfdos/rootfs/usr/bin/man new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/man @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/md5sum b/src/_nfdos/rootfs/usr/bin/md5sum new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/md5sum @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/mesg b/src/_nfdos/rootfs/usr/bin/mesg new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/mesg @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/microcom b/src/_nfdos/rootfs/usr/bin/microcom new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/microcom @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/mkfifo b/src/_nfdos/rootfs/usr/bin/mkfifo new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/mkfifo @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/mkpasswd b/src/_nfdos/rootfs/usr/bin/mkpasswd new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/mkpasswd @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/nc b/src/_nfdos/rootfs/usr/bin/nc new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/nc @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/nl b/src/_nfdos/rootfs/usr/bin/nl new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/nl @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/nmeter b/src/_nfdos/rootfs/usr/bin/nmeter new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/nmeter @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/nohup b/src/_nfdos/rootfs/usr/bin/nohup new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/nohup @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/nproc b/src/_nfdos/rootfs/usr/bin/nproc new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/nproc @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/nsenter b/src/_nfdos/rootfs/usr/bin/nsenter new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/nsenter @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/nslookup b/src/_nfdos/rootfs/usr/bin/nslookup new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/nslookup @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/od b/src/_nfdos/rootfs/usr/bin/od new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/od @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/openvt b/src/_nfdos/rootfs/usr/bin/openvt new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/openvt @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/passwd b/src/_nfdos/rootfs/usr/bin/passwd new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/passwd @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/paste b/src/_nfdos/rootfs/usr/bin/paste new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/paste @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/patch b/src/_nfdos/rootfs/usr/bin/patch new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/patch @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/pgrep b/src/_nfdos/rootfs/usr/bin/pgrep new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/pgrep @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/pkill b/src/_nfdos/rootfs/usr/bin/pkill new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/pkill @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/pmap b/src/_nfdos/rootfs/usr/bin/pmap new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/pmap @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/printf b/src/_nfdos/rootfs/usr/bin/printf new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/printf @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/pscan b/src/_nfdos/rootfs/usr/bin/pscan new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/pscan @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/pstree b/src/_nfdos/rootfs/usr/bin/pstree new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/pstree @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/pwdx b/src/_nfdos/rootfs/usr/bin/pwdx new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/pwdx @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/python3 b/src/_nfdos/rootfs/usr/bin/python3 new file mode 100755 index 0000000..74e5744 Binary files /dev/null and b/src/_nfdos/rootfs/usr/bin/python3 differ diff --git a/src/_nfdos/rootfs/usr/bin/readlink b/src/_nfdos/rootfs/usr/bin/readlink new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/readlink @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/realpath b/src/_nfdos/rootfs/usr/bin/realpath new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/realpath @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/renice b/src/_nfdos/rootfs/usr/bin/renice new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/renice @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/reset b/src/_nfdos/rootfs/usr/bin/reset new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/reset @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/resize b/src/_nfdos/rootfs/usr/bin/resize new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/resize @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/rpm2cpio b/src/_nfdos/rootfs/usr/bin/rpm2cpio new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/rpm2cpio @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/runsv b/src/_nfdos/rootfs/usr/bin/runsv new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/runsv @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/runsvdir b/src/_nfdos/rootfs/usr/bin/runsvdir new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/runsvdir @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/rx b/src/_nfdos/rootfs/usr/bin/rx new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/rx @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/script b/src/_nfdos/rootfs/usr/bin/script new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/script @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/seq b/src/_nfdos/rootfs/usr/bin/seq new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/seq @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/setfattr b/src/_nfdos/rootfs/usr/bin/setfattr new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/setfattr @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/setkeycodes b/src/_nfdos/rootfs/usr/bin/setkeycodes new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/setkeycodes @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/setsid b/src/_nfdos/rootfs/usr/bin/setsid new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/setsid @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/setuidgid b/src/_nfdos/rootfs/usr/bin/setuidgid new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/setuidgid @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/sha1sum b/src/_nfdos/rootfs/usr/bin/sha1sum new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/sha1sum @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/sha256sum b/src/_nfdos/rootfs/usr/bin/sha256sum new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/sha256sum @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/sha384sum b/src/_nfdos/rootfs/usr/bin/sha384sum new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/sha384sum @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/sha3sum b/src/_nfdos/rootfs/usr/bin/sha3sum new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/sha3sum @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/sha512sum b/src/_nfdos/rootfs/usr/bin/sha512sum new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/sha512sum @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/showkey b/src/_nfdos/rootfs/usr/bin/showkey new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/showkey @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/shred b/src/_nfdos/rootfs/usr/bin/shred new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/shred @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/shuf b/src/_nfdos/rootfs/usr/bin/shuf new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/shuf @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/smemcap b/src/_nfdos/rootfs/usr/bin/smemcap new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/smemcap @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/softlimit b/src/_nfdos/rootfs/usr/bin/softlimit new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/softlimit @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/sort b/src/_nfdos/rootfs/usr/bin/sort new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/sort @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/split b/src/_nfdos/rootfs/usr/bin/split new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/split @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/ssl_client b/src/_nfdos/rootfs/usr/bin/ssl_client new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/ssl_client @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/strings b/src/_nfdos/rootfs/usr/bin/strings new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/strings @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/sum b/src/_nfdos/rootfs/usr/bin/sum new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/sum @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/sv b/src/_nfdos/rootfs/usr/bin/sv new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/sv @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/svc b/src/_nfdos/rootfs/usr/bin/svc new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/svc @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/svok b/src/_nfdos/rootfs/usr/bin/svok new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/svok @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/tac b/src/_nfdos/rootfs/usr/bin/tac new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/tac @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/tail b/src/_nfdos/rootfs/usr/bin/tail new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/tail @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/taskset b/src/_nfdos/rootfs/usr/bin/taskset new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/taskset @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/tcpsvd b/src/_nfdos/rootfs/usr/bin/tcpsvd new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/tcpsvd @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/tee b/src/_nfdos/rootfs/usr/bin/tee new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/tee @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/telnet b/src/_nfdos/rootfs/usr/bin/telnet new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/telnet @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/test b/src/_nfdos/rootfs/usr/bin/test new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/test @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/tftp b/src/_nfdos/rootfs/usr/bin/tftp new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/tftp @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/time b/src/_nfdos/rootfs/usr/bin/time new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/time @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/timeout b/src/_nfdos/rootfs/usr/bin/timeout new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/timeout @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/top b/src/_nfdos/rootfs/usr/bin/top new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/top @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/tr b/src/_nfdos/rootfs/usr/bin/tr new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/tr @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/traceroute b/src/_nfdos/rootfs/usr/bin/traceroute new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/traceroute @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/traceroute6 b/src/_nfdos/rootfs/usr/bin/traceroute6 new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/traceroute6 @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/tree b/src/_nfdos/rootfs/usr/bin/tree new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/tree @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/truncate b/src/_nfdos/rootfs/usr/bin/truncate new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/truncate @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/ts b/src/_nfdos/rootfs/usr/bin/ts new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/ts @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/tsort b/src/_nfdos/rootfs/usr/bin/tsort new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/tsort @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/tty b/src/_nfdos/rootfs/usr/bin/tty new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/tty @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/ttysize b/src/_nfdos/rootfs/usr/bin/ttysize new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/ttysize @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/udhcpc6 b/src/_nfdos/rootfs/usr/bin/udhcpc6 new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/udhcpc6 @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/udpsvd b/src/_nfdos/rootfs/usr/bin/udpsvd new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/udpsvd @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/unexpand b/src/_nfdos/rootfs/usr/bin/unexpand new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/unexpand @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/uniq b/src/_nfdos/rootfs/usr/bin/uniq new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/uniq @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/unix2dos b/src/_nfdos/rootfs/usr/bin/unix2dos new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/unix2dos @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/unlink b/src/_nfdos/rootfs/usr/bin/unlink new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/unlink @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/unlzma b/src/_nfdos/rootfs/usr/bin/unlzma new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/unlzma @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/unshare b/src/_nfdos/rootfs/usr/bin/unshare new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/unshare @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/unxz b/src/_nfdos/rootfs/usr/bin/unxz new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/unxz @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/unzip b/src/_nfdos/rootfs/usr/bin/unzip new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/unzip @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/uptime b/src/_nfdos/rootfs/usr/bin/uptime new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/uptime @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/users b/src/_nfdos/rootfs/usr/bin/users new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/users @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/uudecode b/src/_nfdos/rootfs/usr/bin/uudecode new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/uudecode @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/uuencode b/src/_nfdos/rootfs/usr/bin/uuencode new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/uuencode @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/vlock b/src/_nfdos/rootfs/usr/bin/vlock new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/vlock @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/volname b/src/_nfdos/rootfs/usr/bin/volname new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/volname @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/w b/src/_nfdos/rootfs/usr/bin/w new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/w @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/wall b/src/_nfdos/rootfs/usr/bin/wall new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/wall @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/wc b/src/_nfdos/rootfs/usr/bin/wc new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/wc @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/wget b/src/_nfdos/rootfs/usr/bin/wget new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/wget @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/which b/src/_nfdos/rootfs/usr/bin/which new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/which @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/who b/src/_nfdos/rootfs/usr/bin/who new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/who @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/whoami b/src/_nfdos/rootfs/usr/bin/whoami new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/whoami @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/whois b/src/_nfdos/rootfs/usr/bin/whois new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/whois @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/xargs b/src/_nfdos/rootfs/usr/bin/xargs new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/xargs @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/xxd b/src/_nfdos/rootfs/usr/bin/xxd new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/xxd @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/xz b/src/_nfdos/rootfs/usr/bin/xz new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/xz @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/xzcat b/src/_nfdos/rootfs/usr/bin/xzcat new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/xzcat @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/bin/yes b/src/_nfdos/rootfs/usr/bin/yes new file mode 120000 index 0000000..f948f1a --- /dev/null +++ b/src/_nfdos/rootfs/usr/bin/yes @@ -0,0 +1 @@ +../../bin/busybox \ No newline at end of file diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/__future__.py b/src/_nfdos/rootfs/usr/lib/python3.13/__future__.py new file mode 100644 index 0000000..39720a5 --- /dev/null +++ b/src/_nfdos/rootfs/usr/lib/python3.13/__future__.py @@ -0,0 +1,147 @@ +"""Record of phased-in incompatible language changes. + +Each line is of the form: + + FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease "," + CompilerFlag ")" + +where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples +of the same form as sys.version_info: + + (PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int + PY_MINOR_VERSION, # the 1; an int + PY_MICRO_VERSION, # the 0; an int + PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string + PY_RELEASE_SERIAL # the 3; an int + ) + +OptionalRelease records the first release in which + + from __future__ import FeatureName + +was accepted. + +In the case of MandatoryReleases that have not yet occurred, +MandatoryRelease predicts the release in which the feature will become part +of the language. + +Else MandatoryRelease records when the feature became part of the language; +in releases at or after that, modules no longer need + + from __future__ import FeatureName + +to use the feature in question, but may continue to use such imports. + +MandatoryRelease may also be None, meaning that a planned feature got +dropped or that the release version is undetermined. + +Instances of class _Feature have two corresponding methods, +.getOptionalRelease() and .getMandatoryRelease(). + +CompilerFlag is the (bitfield) flag that should be passed in the fourth +argument to the builtin function compile() to enable the feature in +dynamically compiled code. This flag is stored in the .compiler_flag +attribute on _Future instances. These values must match the appropriate +#defines of CO_xxx flags in Include/cpython/compile.h. + +No feature line is ever to be deleted from this file. +""" + +all_feature_names = [ + "nested_scopes", + "generators", + "division", + "absolute_import", + "with_statement", + "print_function", + "unicode_literals", + "barry_as_FLUFL", + "generator_stop", + "annotations", +] + +__all__ = ["all_feature_names"] + all_feature_names + +# The CO_xxx symbols are defined here under the same names defined in +# code.h and used by compile.h, so that an editor search will find them here. +# However, they're not exported in __all__, because they don't really belong to +# this module. +CO_NESTED = 0x0010 # nested_scopes +CO_GENERATOR_ALLOWED = 0 # generators (obsolete, was 0x1000) +CO_FUTURE_DIVISION = 0x20000 # division +CO_FUTURE_ABSOLUTE_IMPORT = 0x40000 # perform absolute imports by default +CO_FUTURE_WITH_STATEMENT = 0x80000 # with statement +CO_FUTURE_PRINT_FUNCTION = 0x100000 # print function +CO_FUTURE_UNICODE_LITERALS = 0x200000 # unicode string literals +CO_FUTURE_BARRY_AS_BDFL = 0x400000 +CO_FUTURE_GENERATOR_STOP = 0x800000 # StopIteration becomes RuntimeError in generators +CO_FUTURE_ANNOTATIONS = 0x1000000 # annotations become strings at runtime + + +class _Feature: + + def __init__(self, optionalRelease, mandatoryRelease, compiler_flag): + self.optional = optionalRelease + self.mandatory = mandatoryRelease + self.compiler_flag = compiler_flag + + def getOptionalRelease(self): + """Return first release in which this feature was recognized. + + This is a 5-tuple, of the same form as sys.version_info. + """ + return self.optional + + def getMandatoryRelease(self): + """Return release in which this feature will become mandatory. + + This is a 5-tuple, of the same form as sys.version_info, or, if + the feature was dropped, or the release date is undetermined, is None. + """ + return self.mandatory + + def __repr__(self): + return "_Feature" + repr((self.optional, + self.mandatory, + self.compiler_flag)) + + +nested_scopes = _Feature((2, 1, 0, "beta", 1), + (2, 2, 0, "alpha", 0), + CO_NESTED) + +generators = _Feature((2, 2, 0, "alpha", 1), + (2, 3, 0, "final", 0), + CO_GENERATOR_ALLOWED) + +division = _Feature((2, 2, 0, "alpha", 2), + (3, 0, 0, "alpha", 0), + CO_FUTURE_DIVISION) + +absolute_import = _Feature((2, 5, 0, "alpha", 1), + (3, 0, 0, "alpha", 0), + CO_FUTURE_ABSOLUTE_IMPORT) + +with_statement = _Feature((2, 5, 0, "alpha", 1), + (2, 6, 0, "alpha", 0), + CO_FUTURE_WITH_STATEMENT) + +print_function = _Feature((2, 6, 0, "alpha", 2), + (3, 0, 0, "alpha", 0), + CO_FUTURE_PRINT_FUNCTION) + +unicode_literals = _Feature((2, 6, 0, "alpha", 2), + (3, 0, 0, "alpha", 0), + CO_FUTURE_UNICODE_LITERALS) + +barry_as_FLUFL = _Feature((3, 1, 0, "alpha", 2), + (4, 0, 0, "alpha", 0), + CO_FUTURE_BARRY_AS_BDFL) + +generator_stop = _Feature((3, 5, 0, "beta", 1), + (3, 7, 0, "alpha", 0), + CO_FUTURE_GENERATOR_STOP) + +annotations = _Feature((3, 7, 0, "beta", 1), + None, + CO_FUTURE_ANNOTATIONS) diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/__hello__.py b/src/_nfdos/rootfs/usr/lib/python3.13/__hello__.py new file mode 100644 index 0000000..c09d6a4 --- /dev/null +++ b/src/_nfdos/rootfs/usr/lib/python3.13/__hello__.py @@ -0,0 +1,16 @@ +initialized = True + +class TestFrozenUtf8_1: + """\u00b6""" + +class TestFrozenUtf8_2: + """\u03c0""" + +class TestFrozenUtf8_4: + """\U0001f600""" + +def main(): + print("Hello world!") + +if __name__ == '__main__': + main() diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/__phello__/__init__.py b/src/_nfdos/rootfs/usr/lib/python3.13/__phello__/__init__.py new file mode 100644 index 0000000..d37bd27 --- /dev/null +++ b/src/_nfdos/rootfs/usr/lib/python3.13/__phello__/__init__.py @@ -0,0 +1,7 @@ +initialized = True + +def main(): + print("Hello world!") + +if __name__ == '__main__': + main() diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/__phello__/ham/__init__.py b/src/_nfdos/rootfs/usr/lib/python3.13/__phello__/ham/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/__phello__/ham/eggs.py b/src/_nfdos/rootfs/usr/lib/python3.13/__phello__/ham/eggs.py new file mode 100644 index 0000000..e69de29 diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/__phello__/spam.py b/src/_nfdos/rootfs/usr/lib/python3.13/__phello__/spam.py new file mode 100644 index 0000000..d37bd27 --- /dev/null +++ b/src/_nfdos/rootfs/usr/lib/python3.13/__phello__/spam.py @@ -0,0 +1,7 @@ +initialized = True + +def main(): + print("Hello world!") + +if __name__ == '__main__': + main() diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/_aix_support.py b/src/_nfdos/rootfs/usr/lib/python3.13/_aix_support.py new file mode 100644 index 0000000..dadc75c --- /dev/null +++ b/src/_nfdos/rootfs/usr/lib/python3.13/_aix_support.py @@ -0,0 +1,108 @@ +"""Shared AIX support functions.""" + +import sys +import sysconfig + + +# Taken from _osx_support _read_output function +def _read_cmd_output(commandstring, capture_stderr=False): + """Output from successful command execution or None""" + # Similar to os.popen(commandstring, "r").read(), + # but without actually using os.popen because that + # function is not usable during python bootstrap. + import os + import contextlib + fp = open("/tmp/_aix_support.%s"%( + os.getpid(),), "w+b") + + with contextlib.closing(fp) as fp: + if capture_stderr: + cmd = "%s >'%s' 2>&1" % (commandstring, fp.name) + else: + cmd = "%s 2>/dev/null >'%s'" % (commandstring, fp.name) + return fp.read() if not os.system(cmd) else None + + +def _aix_tag(vrtl, bd): + # type: (List[int], int) -> str + # Infer the ABI bitwidth from maxsize (assuming 64 bit as the default) + _sz = 32 if sys.maxsize == (2**31-1) else 64 + _bd = bd if bd != 0 else 9988 + # vrtl[version, release, technology_level] + return "aix-{:1x}{:1d}{:02d}-{:04d}-{}".format(vrtl[0], vrtl[1], vrtl[2], _bd, _sz) + + +# extract version, release and technology level from a VRMF string +def _aix_vrtl(vrmf): + # type: (str) -> List[int] + v, r, tl = vrmf.split(".")[:3] + return [int(v[-1]), int(r), int(tl)] + + +def _aix_bos_rte(): + # type: () -> Tuple[str, int] + """ + Return a Tuple[str, int] e.g., ['7.1.4.34', 1806] + The fileset bos.rte represents the current AIX run-time level. It's VRMF and + builddate reflect the current ABI levels of the runtime environment. + If no builddate is found give a value that will satisfy pep425 related queries + """ + # All AIX systems to have lslpp installed in this location + # subprocess may not be available during python bootstrap + try: + import subprocess + out = subprocess.check_output(["/usr/bin/lslpp", "-Lqc", "bos.rte"]) + except ImportError: + out = _read_cmd_output("/usr/bin/lslpp -Lqc bos.rte") + out = out.decode("utf-8") + out = out.strip().split(":") # type: ignore + _bd = int(out[-1]) if out[-1] != '' else 9988 + return (str(out[2]), _bd) + + +def aix_platform(): + # type: () -> str + """ + AIX filesets are identified by four decimal values: V.R.M.F. + V (version) and R (release) can be retrieved using ``uname`` + Since 2007, starting with AIX 5.3 TL7, the M value has been + included with the fileset bos.rte and represents the Technology + Level (TL) of AIX. The F (Fix) value also increases, but is not + relevant for comparing releases and binary compatibility. + For binary compatibility the so-called builddate is needed. + Again, the builddate of an AIX release is associated with bos.rte. + AIX ABI compatibility is described as guaranteed at: https://www.ibm.com/\ + support/knowledgecenter/en/ssw_aix_72/install/binary_compatability.html + + For pep425 purposes the AIX platform tag becomes: + "aix-{:1x}{:1d}{:02d}-{:04d}-{}".format(v, r, tl, builddate, bitsize) + e.g., "aix-6107-1415-32" for AIX 6.1 TL7 bd 1415, 32-bit + and, "aix-6107-1415-64" for AIX 6.1 TL7 bd 1415, 64-bit + """ + vrmf, bd = _aix_bos_rte() + return _aix_tag(_aix_vrtl(vrmf), bd) + + +# extract vrtl from the BUILD_GNU_TYPE as an int +def _aix_bgt(): + # type: () -> List[int] + gnu_type = sysconfig.get_config_var("BUILD_GNU_TYPE") + if not gnu_type: + raise ValueError("BUILD_GNU_TYPE is not defined") + return _aix_vrtl(vrmf=gnu_type) + + +def aix_buildtag(): + # type: () -> str + """ + Return the platform_tag of the system Python was built on. + """ + # AIX_BUILDDATE is defined by configure with: + # lslpp -Lcq bos.rte | awk -F: '{ print $NF }' + build_date = sysconfig.get_config_var("AIX_BUILDDATE") + try: + build_date = int(build_date) + except (ValueError, TypeError): + raise ValueError(f"AIX_BUILDDATE is not defined or invalid: " + f"{build_date!r}") + return _aix_tag(_aix_bgt(), build_date) diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/_android_support.py b/src/_nfdos/rootfs/usr/lib/python3.13/_android_support.py new file mode 100644 index 0000000..a439d03 --- /dev/null +++ b/src/_nfdos/rootfs/usr/lib/python3.13/_android_support.py @@ -0,0 +1,185 @@ +import io +import sys +from threading import RLock +from time import sleep, time + +# The maximum length of a log message in bytes, including the level marker and +# tag, is defined as LOGGER_ENTRY_MAX_PAYLOAD at +# https://cs.android.com/android/platform/superproject/+/android-14.0.0_r1:system/logging/liblog/include/log/log.h;l=71. +# Messages longer than this will be truncated by logcat. This limit has already +# been reduced at least once in the history of Android (from 4076 to 4068 between +# API level 23 and 26), so leave some headroom. +MAX_BYTES_PER_WRITE = 4000 + +# UTF-8 uses a maximum of 4 bytes per character, so limiting text writes to this +# size ensures that we can always avoid exceeding MAX_BYTES_PER_WRITE. +# However, if the actual number of bytes per character is smaller than that, +# then we may still join multiple consecutive text writes into binary +# writes containing a larger number of characters. +MAX_CHARS_PER_WRITE = MAX_BYTES_PER_WRITE // 4 + + +# When embedded in an app on current versions of Android, there's no easy way to +# monitor the C-level stdout and stderr. The testbed comes with a .c file to +# redirect them to the system log using a pipe, but that wouldn't be convenient +# or appropriate for all apps. So we redirect at the Python level instead. +def init_streams(android_log_write, stdout_prio, stderr_prio): + if sys.executable: + return # Not embedded in an app. + + global logcat + logcat = Logcat(android_log_write) + sys.stdout = TextLogStream(stdout_prio, "python.stdout", sys.stdout) + sys.stderr = TextLogStream(stderr_prio, "python.stderr", sys.stderr) + + +class TextLogStream(io.TextIOWrapper): + def __init__(self, prio, tag, original=None, **kwargs): + # Respect the -u option. + if original: + kwargs.setdefault("write_through", original.write_through) + fileno = original.fileno() + else: + fileno = None + + # The default is surrogateescape for stdout and backslashreplace for + # stderr, but in the context of an Android log, readability is more + # important than reversibility. + kwargs.setdefault("encoding", "UTF-8") + kwargs.setdefault("errors", "backslashreplace") + + super().__init__(BinaryLogStream(prio, tag, fileno), **kwargs) + self._lock = RLock() + self._pending_bytes = [] + self._pending_bytes_count = 0 + + def __repr__(self): + return f"" + + def write(self, s): + if not isinstance(s, str): + raise TypeError( + f"write() argument must be str, not {type(s).__name__}") + + # In case `s` is a str subclass that writes itself to stdout or stderr + # when we call its methods, convert it to an actual str. + s = str.__str__(s) + + # We want to emit one log message per line wherever possible, so split + # the string into lines first. Note that "".splitlines() == [], so + # nothing will be logged for an empty string. + with self._lock: + for line in s.splitlines(keepends=True): + while line: + chunk = line[:MAX_CHARS_PER_WRITE] + line = line[MAX_CHARS_PER_WRITE:] + self._write_chunk(chunk) + + return len(s) + + # The size and behavior of TextIOWrapper's buffer is not part of its public + # API, so we handle buffering ourselves to avoid truncation. + def _write_chunk(self, s): + b = s.encode(self.encoding, self.errors) + if self._pending_bytes_count + len(b) > MAX_BYTES_PER_WRITE: + self.flush() + + self._pending_bytes.append(b) + self._pending_bytes_count += len(b) + if ( + self.write_through + or b.endswith(b"\n") + or self._pending_bytes_count > MAX_BYTES_PER_WRITE + ): + self.flush() + + def flush(self): + with self._lock: + self.buffer.write(b"".join(self._pending_bytes)) + self._pending_bytes.clear() + self._pending_bytes_count = 0 + + # Since this is a line-based logging system, line buffering cannot be turned + # off, i.e. a newline always causes a flush. + @property + def line_buffering(self): + return True + + +class BinaryLogStream(io.RawIOBase): + def __init__(self, prio, tag, fileno=None): + self.prio = prio + self.tag = tag + self._fileno = fileno + + def __repr__(self): + return f"" + + def writable(self): + return True + + def write(self, b): + if type(b) is not bytes: + try: + b = bytes(memoryview(b)) + except TypeError: + raise TypeError( + f"write() argument must be bytes-like, not {type(b).__name__}" + ) from None + + # Writing an empty string to the stream should have no effect. + if b: + logcat.write(self.prio, self.tag, b) + return len(b) + + # This is needed by the test suite --timeout option, which uses faulthandler. + def fileno(self): + if self._fileno is None: + raise io.UnsupportedOperation("fileno") + return self._fileno + + +# When a large volume of data is written to logcat at once, e.g. when a test +# module fails in --verbose3 mode, there's a risk of overflowing logcat's own +# buffer and losing messages. We avoid this by imposing a rate limit using the +# token bucket algorithm, based on a conservative estimate of how fast `adb +# logcat` can consume data. +MAX_BYTES_PER_SECOND = 1024 * 1024 + +# The logcat buffer size of a device can be determined by running `logcat -g`. +# We set the token bucket size to half of the buffer size of our current minimum +# API level, because other things on the system will be producing messages as +# well. +BUCKET_SIZE = 128 * 1024 + +# https://cs.android.com/android/platform/superproject/+/android-14.0.0_r1:system/logging/liblog/include/log/log_read.h;l=39 +PER_MESSAGE_OVERHEAD = 28 + + +class Logcat: + def __init__(self, android_log_write): + self.android_log_write = android_log_write + self._lock = RLock() + self._bucket_level = 0 + self._prev_write_time = time() + + def write(self, prio, tag, message): + # Encode null bytes using "modified UTF-8" to avoid them truncating the + # message. + message = message.replace(b"\x00", b"\xc0\x80") + + with self._lock: + now = time() + self._bucket_level += ( + (now - self._prev_write_time) * MAX_BYTES_PER_SECOND) + + # If the bucket level is still below zero, the clock must have gone + # backwards, so reset it to zero and continue. + self._bucket_level = max(0, min(self._bucket_level, BUCKET_SIZE)) + self._prev_write_time = now + + self._bucket_level -= PER_MESSAGE_OVERHEAD + len(tag) + len(message) + if self._bucket_level < 0: + sleep(-self._bucket_level / MAX_BYTES_PER_SECOND) + + self.android_log_write(prio, tag, message) diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/_apple_support.py b/src/_nfdos/rootfs/usr/lib/python3.13/_apple_support.py new file mode 100644 index 0000000..92febdc --- /dev/null +++ b/src/_nfdos/rootfs/usr/lib/python3.13/_apple_support.py @@ -0,0 +1,66 @@ +import io +import sys + + +def init_streams(log_write, stdout_level, stderr_level): + # Redirect stdout and stderr to the Apple system log. This method is + # invoked by init_apple_streams() (initconfig.c) if config->use_system_logger + # is enabled. + sys.stdout = SystemLog(log_write, stdout_level, errors=sys.stderr.errors) + sys.stderr = SystemLog(log_write, stderr_level, errors=sys.stderr.errors) + + +class SystemLog(io.TextIOWrapper): + def __init__(self, log_write, level, **kwargs): + kwargs.setdefault("encoding", "UTF-8") + kwargs.setdefault("line_buffering", True) + super().__init__(LogStream(log_write, level), **kwargs) + + def __repr__(self): + return f"" + + def write(self, s): + if not isinstance(s, str): + raise TypeError( + f"write() argument must be str, not {type(s).__name__}") + + # In case `s` is a str subclass that writes itself to stdout or stderr + # when we call its methods, convert it to an actual str. + s = str.__str__(s) + + # We want to emit one log message per line, so split + # the string before sending it to the superclass. + for line in s.splitlines(keepends=True): + super().write(line) + + return len(s) + + +class LogStream(io.RawIOBase): + def __init__(self, log_write, level): + self.log_write = log_write + self.level = level + + def __repr__(self): + return f"" + + def writable(self): + return True + + def write(self, b): + if type(b) is not bytes: + try: + b = bytes(memoryview(b)) + except TypeError: + raise TypeError( + f"write() argument must be bytes-like, not {type(b).__name__}" + ) from None + + # Writing an empty string to the stream should have no effect. + if b: + # Encode null bytes using "modified UTF-8" to avoid truncating the + # message. This should not affect the return value, as the caller + # may be expecting it to match the length of the input. + self.log_write(self.level, b.replace(b"\x00", b"\xc0\x80")) + + return len(b) diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/_ast_unparse.py b/src/_nfdos/rootfs/usr/lib/python3.13/_ast_unparse.py new file mode 100644 index 0000000..16cf56f --- /dev/null +++ b/src/_nfdos/rootfs/usr/lib/python3.13/_ast_unparse.py @@ -0,0 +1,1160 @@ +# This module contains ``ast.unparse()``, defined here +# to improve the import time for the ``ast`` module. +import sys +from _ast import * +from ast import NodeVisitor +from contextlib import contextmanager, nullcontext +from enum import IntEnum, auto, _simple_enum + +# Large float and imaginary literals get turned into infinities in the AST. +# We unparse those infinities to INFSTR. +_INFSTR = "1e" + repr(sys.float_info.max_10_exp + 1) + +@_simple_enum(IntEnum) +class _Precedence: + """Precedence table that originated from python grammar.""" + + NAMED_EXPR = auto() # := + TUPLE = auto() # , + YIELD = auto() # 'yield', 'yield from' + TEST = auto() # 'if'-'else', 'lambda' + OR = auto() # 'or' + AND = auto() # 'and' + NOT = auto() # 'not' + CMP = auto() # '<', '>', '==', '>=', '<=', '!=', + # 'in', 'not in', 'is', 'is not' + EXPR = auto() + BOR = EXPR # '|' + BXOR = auto() # '^' + BAND = auto() # '&' + SHIFT = auto() # '<<', '>>' + ARITH = auto() # '+', '-' + TERM = auto() # '*', '@', '/', '%', '//' + FACTOR = auto() # unary '+', '-', '~' + POWER = auto() # '**' + AWAIT = auto() # 'await' + ATOM = auto() + + def next(self): + try: + return self.__class__(self + 1) + except ValueError: + return self + + +_SINGLE_QUOTES = ("'", '"') +_MULTI_QUOTES = ('"""', "'''") +_ALL_QUOTES = (*_SINGLE_QUOTES, *_MULTI_QUOTES) + +class Unparser(NodeVisitor): + """Methods in this class recursively traverse an AST and + output source code for the abstract syntax; original formatting + is disregarded.""" + + def __init__(self): + self._source = [] + self._precedences = {} + self._type_ignores = {} + self._indent = 0 + self._in_try_star = False + self._in_interactive = False + + def interleave(self, inter, f, seq): + """Call f on each item in seq, calling inter() in between.""" + seq = iter(seq) + try: + f(next(seq)) + except StopIteration: + pass + else: + for x in seq: + inter() + f(x) + + def items_view(self, traverser, items): + """Traverse and separate the given *items* with a comma and append it to + the buffer. If *items* is a single item sequence, a trailing comma + will be added.""" + if len(items) == 1: + traverser(items[0]) + self.write(",") + else: + self.interleave(lambda: self.write(", "), traverser, items) + + def maybe_newline(self): + """Adds a newline if it isn't the start of generated source""" + if self._source: + self.write("\n") + + def maybe_semicolon(self): + """Adds a "; " delimiter if it isn't the start of generated source""" + if self._source: + self.write("; ") + + def fill(self, text="", *, allow_semicolon=True): + """Indent a piece of text and append it, according to the current + indentation level, or only delineate with semicolon if applicable""" + if self._in_interactive and not self._indent and allow_semicolon: + self.maybe_semicolon() + self.write(text) + else: + self.maybe_newline() + self.write(" " * self._indent + text) + + def write(self, *text): + """Add new source parts""" + self._source.extend(text) + + @contextmanager + def buffered(self, buffer = None): + if buffer is None: + buffer = [] + + original_source = self._source + self._source = buffer + yield buffer + self._source = original_source + + @contextmanager + def block(self, *, extra = None): + """A context manager for preparing the source for blocks. It adds + the character':', increases the indentation on enter and decreases + the indentation on exit. If *extra* is given, it will be directly + appended after the colon character. + """ + self.write(":") + if extra: + self.write(extra) + self._indent += 1 + yield + self._indent -= 1 + + @contextmanager + def delimit(self, start, end): + """A context manager for preparing the source for expressions. It adds + *start* to the buffer and enters, after exit it adds *end*.""" + + self.write(start) + yield + self.write(end) + + def delimit_if(self, start, end, condition): + if condition: + return self.delimit(start, end) + else: + return nullcontext() + + def require_parens(self, precedence, node): + """Shortcut to adding precedence related parens""" + return self.delimit_if("(", ")", self.get_precedence(node) > precedence) + + def get_precedence(self, node): + return self._precedences.get(node, _Precedence.TEST) + + def set_precedence(self, precedence, *nodes): + for node in nodes: + self._precedences[node] = precedence + + def get_raw_docstring(self, node): + """If a docstring node is found in the body of the *node* parameter, + return that docstring node, None otherwise. + + Logic mirrored from ``_PyAST_GetDocString``.""" + if not isinstance( + node, (AsyncFunctionDef, FunctionDef, ClassDef, Module) + ) or len(node.body) < 1: + return None + node = node.body[0] + if not isinstance(node, Expr): + return None + node = node.value + if isinstance(node, Constant) and isinstance(node.value, str): + return node + + def get_type_comment(self, node): + comment = self._type_ignores.get(node.lineno) or node.type_comment + if comment is not None: + return f" # type: {comment}" + + def traverse(self, node): + if isinstance(node, list): + for item in node: + self.traverse(item) + else: + super().visit(node) + + # Note: as visit() resets the output text, do NOT rely on + # NodeVisitor.generic_visit to handle any nodes (as it calls back in to + # the subclass visit() method, which resets self._source to an empty list) + def visit(self, node): + """Outputs a source code string that, if converted back to an ast + (using ast.parse) will generate an AST equivalent to *node*""" + self._source = [] + self.traverse(node) + return "".join(self._source) + + def _write_docstring_and_traverse_body(self, node): + if (docstring := self.get_raw_docstring(node)): + self._write_docstring(docstring) + self.traverse(node.body[1:]) + else: + self.traverse(node.body) + + def visit_Module(self, node): + self._type_ignores = { + ignore.lineno: f"ignore{ignore.tag}" + for ignore in node.type_ignores + } + try: + self._write_docstring_and_traverse_body(node) + finally: + self._type_ignores.clear() + + def visit_Interactive(self, node): + self._in_interactive = True + try: + self._write_docstring_and_traverse_body(node) + finally: + self._in_interactive = False + + def visit_FunctionType(self, node): + with self.delimit("(", ")"): + self.interleave( + lambda: self.write(", "), self.traverse, node.argtypes + ) + + self.write(" -> ") + self.traverse(node.returns) + + def visit_Expr(self, node): + self.fill() + self.set_precedence(_Precedence.YIELD, node.value) + self.traverse(node.value) + + def visit_NamedExpr(self, node): + with self.require_parens(_Precedence.NAMED_EXPR, node): + self.set_precedence(_Precedence.ATOM, node.target, node.value) + self.traverse(node.target) + self.write(" := ") + self.traverse(node.value) + + def visit_Import(self, node): + self.fill("import ") + self.interleave(lambda: self.write(", "), self.traverse, node.names) + + def visit_ImportFrom(self, node): + self.fill("from ") + self.write("." * (node.level or 0)) + if node.module: + self.write(node.module) + self.write(" import ") + self.interleave(lambda: self.write(", "), self.traverse, node.names) + + def visit_Assign(self, node): + self.fill() + for target in node.targets: + self.set_precedence(_Precedence.TUPLE, target) + self.traverse(target) + self.write(" = ") + self.traverse(node.value) + if type_comment := self.get_type_comment(node): + self.write(type_comment) + + def visit_AugAssign(self, node): + self.fill() + self.traverse(node.target) + self.write(" " + self.binop[node.op.__class__.__name__] + "= ") + self.traverse(node.value) + + def visit_AnnAssign(self, node): + self.fill() + with self.delimit_if("(", ")", not node.simple and isinstance(node.target, Name)): + self.traverse(node.target) + self.write(": ") + self.traverse(node.annotation) + if node.value: + self.write(" = ") + self.traverse(node.value) + + def visit_Return(self, node): + self.fill("return") + if node.value: + self.write(" ") + self.traverse(node.value) + + def visit_Pass(self, node): + self.fill("pass") + + def visit_Break(self, node): + self.fill("break") + + def visit_Continue(self, node): + self.fill("continue") + + def visit_Delete(self, node): + self.fill("del ") + self.interleave(lambda: self.write(", "), self.traverse, node.targets) + + def visit_Assert(self, node): + self.fill("assert ") + self.traverse(node.test) + if node.msg: + self.write(", ") + self.traverse(node.msg) + + def visit_Global(self, node): + self.fill("global ") + self.interleave(lambda: self.write(", "), self.write, node.names) + + def visit_Nonlocal(self, node): + self.fill("nonlocal ") + self.interleave(lambda: self.write(", "), self.write, node.names) + + def visit_Await(self, node): + with self.require_parens(_Precedence.AWAIT, node): + self.write("await") + if node.value: + self.write(" ") + self.set_precedence(_Precedence.ATOM, node.value) + self.traverse(node.value) + + def visit_Yield(self, node): + with self.require_parens(_Precedence.YIELD, node): + self.write("yield") + if node.value: + self.write(" ") + self.set_precedence(_Precedence.ATOM, node.value) + self.traverse(node.value) + + def visit_YieldFrom(self, node): + with self.require_parens(_Precedence.YIELD, node): + self.write("yield from ") + if not node.value: + raise ValueError("Node can't be used without a value attribute.") + self.set_precedence(_Precedence.ATOM, node.value) + self.traverse(node.value) + + def visit_Raise(self, node): + self.fill("raise") + if not node.exc: + if node.cause: + raise ValueError(f"Node can't use cause without an exception.") + return + self.write(" ") + self.traverse(node.exc) + if node.cause: + self.write(" from ") + self.traverse(node.cause) + + def do_visit_try(self, node): + self.fill("try", allow_semicolon=False) + with self.block(): + self.traverse(node.body) + for ex in node.handlers: + self.traverse(ex) + if node.orelse: + self.fill("else", allow_semicolon=False) + with self.block(): + self.traverse(node.orelse) + if node.finalbody: + self.fill("finally", allow_semicolon=False) + with self.block(): + self.traverse(node.finalbody) + + def visit_Try(self, node): + prev_in_try_star = self._in_try_star + try: + self._in_try_star = False + self.do_visit_try(node) + finally: + self._in_try_star = prev_in_try_star + + def visit_TryStar(self, node): + prev_in_try_star = self._in_try_star + try: + self._in_try_star = True + self.do_visit_try(node) + finally: + self._in_try_star = prev_in_try_star + + def visit_ExceptHandler(self, node): + self.fill("except*" if self._in_try_star else "except", allow_semicolon=False) + if node.type: + self.write(" ") + self.traverse(node.type) + if node.name: + self.write(" as ") + self.write(node.name) + with self.block(): + self.traverse(node.body) + + def visit_ClassDef(self, node): + self.maybe_newline() + for deco in node.decorator_list: + self.fill("@", allow_semicolon=False) + self.traverse(deco) + self.fill("class " + node.name, allow_semicolon=False) + if hasattr(node, "type_params"): + self._type_params_helper(node.type_params) + with self.delimit_if("(", ")", condition = node.bases or node.keywords): + comma = False + for e in node.bases: + if comma: + self.write(", ") + else: + comma = True + self.traverse(e) + for e in node.keywords: + if comma: + self.write(", ") + else: + comma = True + self.traverse(e) + + with self.block(): + self._write_docstring_and_traverse_body(node) + + def visit_FunctionDef(self, node): + self._function_helper(node, "def") + + def visit_AsyncFunctionDef(self, node): + self._function_helper(node, "async def") + + def _function_helper(self, node, fill_suffix): + self.maybe_newline() + for deco in node.decorator_list: + self.fill("@", allow_semicolon=False) + self.traverse(deco) + def_str = fill_suffix + " " + node.name + self.fill(def_str, allow_semicolon=False) + if hasattr(node, "type_params"): + self._type_params_helper(node.type_params) + with self.delimit("(", ")"): + self.traverse(node.args) + if node.returns: + self.write(" -> ") + self.traverse(node.returns) + with self.block(extra=self.get_type_comment(node)): + self._write_docstring_and_traverse_body(node) + + def _type_params_helper(self, type_params): + if type_params is not None and len(type_params) > 0: + with self.delimit("[", "]"): + self.interleave(lambda: self.write(", "), self.traverse, type_params) + + def visit_TypeVar(self, node): + self.write(node.name) + if node.bound: + self.write(": ") + self.traverse(node.bound) + if node.default_value: + self.write(" = ") + self.traverse(node.default_value) + + def visit_TypeVarTuple(self, node): + self.write("*" + node.name) + if node.default_value: + self.write(" = ") + self.traverse(node.default_value) + + def visit_ParamSpec(self, node): + self.write("**" + node.name) + if node.default_value: + self.write(" = ") + self.traverse(node.default_value) + + def visit_TypeAlias(self, node): + self.fill("type ") + self.traverse(node.name) + self._type_params_helper(node.type_params) + self.write(" = ") + self.traverse(node.value) + + def visit_For(self, node): + self._for_helper("for ", node) + + def visit_AsyncFor(self, node): + self._for_helper("async for ", node) + + def _for_helper(self, fill, node): + self.fill(fill, allow_semicolon=False) + self.set_precedence(_Precedence.TUPLE, node.target) + self.traverse(node.target) + self.write(" in ") + self.traverse(node.iter) + with self.block(extra=self.get_type_comment(node)): + self.traverse(node.body) + if node.orelse: + self.fill("else", allow_semicolon=False) + with self.block(): + self.traverse(node.orelse) + + def visit_If(self, node): + self.fill("if ", allow_semicolon=False) + self.traverse(node.test) + with self.block(): + self.traverse(node.body) + # collapse nested ifs into equivalent elifs. + while node.orelse and len(node.orelse) == 1 and isinstance(node.orelse[0], If): + node = node.orelse[0] + self.fill("elif ", allow_semicolon=False) + self.traverse(node.test) + with self.block(): + self.traverse(node.body) + # final else + if node.orelse: + self.fill("else", allow_semicolon=False) + with self.block(): + self.traverse(node.orelse) + + def visit_While(self, node): + self.fill("while ", allow_semicolon=False) + self.traverse(node.test) + with self.block(): + self.traverse(node.body) + if node.orelse: + self.fill("else", allow_semicolon=False) + with self.block(): + self.traverse(node.orelse) + + def visit_With(self, node): + self.fill("with ", allow_semicolon=False) + self.interleave(lambda: self.write(", "), self.traverse, node.items) + with self.block(extra=self.get_type_comment(node)): + self.traverse(node.body) + + def visit_AsyncWith(self, node): + self.fill("async with ", allow_semicolon=False) + self.interleave(lambda: self.write(", "), self.traverse, node.items) + with self.block(extra=self.get_type_comment(node)): + self.traverse(node.body) + + def _str_literal_helper( + self, string, *, quote_types=_ALL_QUOTES, escape_special_whitespace=False + ): + """Helper for writing string literals, minimizing escapes. + Returns the tuple (string literal to write, possible quote types). + """ + def escape_char(c): + # \n and \t are non-printable, but we only escape them if + # escape_special_whitespace is True + if not escape_special_whitespace and c in "\n\t": + return c + # Always escape backslashes and other non-printable characters + if c == "\\" or not c.isprintable(): + return c.encode("unicode_escape").decode("ascii") + return c + + escaped_string = "".join(map(escape_char, string)) + possible_quotes = quote_types + if "\n" in escaped_string: + possible_quotes = [q for q in possible_quotes if q in _MULTI_QUOTES] + possible_quotes = [q for q in possible_quotes if q not in escaped_string] + if not possible_quotes: + # If there aren't any possible_quotes, fallback to using repr + # on the original string. Try to use a quote from quote_types, + # e.g., so that we use triple quotes for docstrings. + string = repr(string) + quote = next((q for q in quote_types if string[0] in q), string[0]) + return string[1:-1], [quote] + if escaped_string: + # Sort so that we prefer '''"''' over """\"""" + possible_quotes.sort(key=lambda q: q[0] == escaped_string[-1]) + # If we're using triple quotes and we'd need to escape a final + # quote, escape it + if possible_quotes[0][0] == escaped_string[-1]: + assert len(possible_quotes[0]) == 3 + escaped_string = escaped_string[:-1] + "\\" + escaped_string[-1] + return escaped_string, possible_quotes + + def _write_str_avoiding_backslashes(self, string, *, quote_types=_ALL_QUOTES): + """Write string literal value with a best effort attempt to avoid backslashes.""" + string, quote_types = self._str_literal_helper(string, quote_types=quote_types) + quote_type = quote_types[0] + self.write(f"{quote_type}{string}{quote_type}") + + def _ftstring_helper(self, parts): + new_parts = [] + quote_types = list(_ALL_QUOTES) + fallback_to_repr = False + for value, is_constant in parts: + if is_constant: + value, new_quote_types = self._str_literal_helper( + value, + quote_types=quote_types, + escape_special_whitespace=True, + ) + if set(new_quote_types).isdisjoint(quote_types): + fallback_to_repr = True + break + quote_types = new_quote_types + else: + if "\n" in value: + quote_types = [q for q in quote_types if q in _MULTI_QUOTES] + assert quote_types + + new_quote_types = [q for q in quote_types if q not in value] + if new_quote_types: + quote_types = new_quote_types + new_parts.append(value) + + if fallback_to_repr: + # If we weren't able to find a quote type that works for all parts + # of the JoinedStr, fallback to using repr and triple single quotes. + quote_types = ["'''"] + new_parts.clear() + for value, is_constant in parts: + if is_constant: + value = repr('"' + value) # force repr to use single quotes + expected_prefix = "'\"" + assert value.startswith(expected_prefix), repr(value) + value = value[len(expected_prefix):-1] + new_parts.append(value) + + value = "".join(new_parts) + quote_type = quote_types[0] + self.write(f"{quote_type}{value}{quote_type}") + + def _write_ftstring(self, values, prefix): + self.write(prefix) + fstring_parts = [] + for value in values: + with self.buffered() as buffer: + self._write_ftstring_inner(value) + fstring_parts.append( + ("".join(buffer), isinstance(value, Constant)) + ) + self._ftstring_helper(fstring_parts) + + def visit_JoinedStr(self, node): + self._write_ftstring(node.values, "f") + + def visit_TemplateStr(self, node): + self._write_ftstring(node.values, "t") + + def _write_ftstring_inner(self, node, is_format_spec=False): + if isinstance(node, JoinedStr): + # for both the f-string itself, and format_spec + for value in node.values: + self._write_ftstring_inner(value, is_format_spec=is_format_spec) + elif isinstance(node, Constant) and isinstance(node.value, str): + value = node.value.replace("{", "{{").replace("}", "}}") + + if is_format_spec: + value = value.replace("\\", "\\\\") + value = value.replace("'", "\\'") + value = value.replace('"', '\\"') + value = value.replace("\n", "\\n") + self.write(value) + elif isinstance(node, FormattedValue): + self.visit_FormattedValue(node) + elif isinstance(node, Interpolation): + self.visit_Interpolation(node) + else: + raise ValueError(f"Unexpected node inside JoinedStr, {node!r}") + + def _unparse_interpolation_value(self, inner): + unparser = type(self)() + unparser.set_precedence(_Precedence.TEST.next(), inner) + return unparser.visit(inner) + + def _write_interpolation(self, node, is_interpolation=False): + with self.delimit("{", "}"): + if is_interpolation: + expr = node.str + else: + expr = self._unparse_interpolation_value(node.value) + if expr.startswith("{"): + # Separate pair of opening brackets as "{ {" + self.write(" ") + self.write(expr) + if node.conversion != -1: + self.write(f"!{chr(node.conversion)}") + if node.format_spec: + self.write(":") + self._write_ftstring_inner(node.format_spec, is_format_spec=True) + + def visit_FormattedValue(self, node): + self._write_interpolation(node) + + def visit_Interpolation(self, node): + self._write_interpolation(node, is_interpolation=True) + + def visit_Name(self, node): + self.write(node.id) + + def _write_docstring(self, node): + self.fill(allow_semicolon=False) + if node.kind == "u": + self.write("u") + self._write_str_avoiding_backslashes(node.value, quote_types=_MULTI_QUOTES) + + def _write_constant(self, value): + if isinstance(value, (float, complex)): + # Substitute overflowing decimal literal for AST infinities, + # and inf - inf for NaNs. + self.write( + repr(value) + .replace("inf", _INFSTR) + .replace("nan", f"({_INFSTR}-{_INFSTR})") + ) + else: + self.write(repr(value)) + + def visit_Constant(self, node): + value = node.value + if isinstance(value, tuple): + with self.delimit("(", ")"): + self.items_view(self._write_constant, value) + elif value is ...: + self.write("...") + else: + if node.kind == "u": + self.write("u") + self._write_constant(node.value) + + def visit_List(self, node): + with self.delimit("[", "]"): + self.interleave(lambda: self.write(", "), self.traverse, node.elts) + + def visit_ListComp(self, node): + with self.delimit("[", "]"): + self.traverse(node.elt) + for gen in node.generators: + self.traverse(gen) + + def visit_GeneratorExp(self, node): + with self.delimit("(", ")"): + self.traverse(node.elt) + for gen in node.generators: + self.traverse(gen) + + def visit_SetComp(self, node): + with self.delimit("{", "}"): + self.traverse(node.elt) + for gen in node.generators: + self.traverse(gen) + + def visit_DictComp(self, node): + with self.delimit("{", "}"): + self.traverse(node.key) + self.write(": ") + self.traverse(node.value) + for gen in node.generators: + self.traverse(gen) + + def visit_comprehension(self, node): + if node.is_async: + self.write(" async for ") + else: + self.write(" for ") + self.set_precedence(_Precedence.TUPLE, node.target) + self.traverse(node.target) + self.write(" in ") + self.set_precedence(_Precedence.TEST.next(), node.iter, *node.ifs) + self.traverse(node.iter) + for if_clause in node.ifs: + self.write(" if ") + self.traverse(if_clause) + + def visit_IfExp(self, node): + with self.require_parens(_Precedence.TEST, node): + self.set_precedence(_Precedence.TEST.next(), node.body, node.test) + self.traverse(node.body) + self.write(" if ") + self.traverse(node.test) + self.write(" else ") + self.set_precedence(_Precedence.TEST, node.orelse) + self.traverse(node.orelse) + + def visit_Set(self, node): + if node.elts: + with self.delimit("{", "}"): + self.interleave(lambda: self.write(", "), self.traverse, node.elts) + else: + # `{}` would be interpreted as a dictionary literal, and + # `set` might be shadowed. Thus: + self.write('{*()}') + + def visit_Dict(self, node): + def write_key_value_pair(k, v): + self.traverse(k) + self.write(": ") + self.traverse(v) + + def write_item(item): + k, v = item + if k is None: + # for dictionary unpacking operator in dicts {**{'y': 2}} + # see PEP 448 for details + self.write("**") + self.set_precedence(_Precedence.EXPR, v) + self.traverse(v) + else: + write_key_value_pair(k, v) + + with self.delimit("{", "}"): + self.interleave( + lambda: self.write(", "), write_item, zip(node.keys, node.values) + ) + + def visit_Tuple(self, node): + with self.delimit_if( + "(", + ")", + len(node.elts) == 0 or self.get_precedence(node) > _Precedence.TUPLE + ): + self.items_view(self.traverse, node.elts) + + unop = {"Invert": "~", "Not": "not", "UAdd": "+", "USub": "-"} + unop_precedence = { + "not": _Precedence.NOT, + "~": _Precedence.FACTOR, + "+": _Precedence.FACTOR, + "-": _Precedence.FACTOR, + } + + def visit_UnaryOp(self, node): + operator = self.unop[node.op.__class__.__name__] + operator_precedence = self.unop_precedence[operator] + with self.require_parens(operator_precedence, node): + self.write(operator) + # factor prefixes (+, -, ~) shouldn't be separated + # from the value they belong, (e.g: +1 instead of + 1) + if operator_precedence is not _Precedence.FACTOR: + self.write(" ") + self.set_precedence(operator_precedence, node.operand) + self.traverse(node.operand) + + binop = { + "Add": "+", + "Sub": "-", + "Mult": "*", + "MatMult": "@", + "Div": "/", + "Mod": "%", + "LShift": "<<", + "RShift": ">>", + "BitOr": "|", + "BitXor": "^", + "BitAnd": "&", + "FloorDiv": "//", + "Pow": "**", + } + + binop_precedence = { + "+": _Precedence.ARITH, + "-": _Precedence.ARITH, + "*": _Precedence.TERM, + "@": _Precedence.TERM, + "/": _Precedence.TERM, + "%": _Precedence.TERM, + "<<": _Precedence.SHIFT, + ">>": _Precedence.SHIFT, + "|": _Precedence.BOR, + "^": _Precedence.BXOR, + "&": _Precedence.BAND, + "//": _Precedence.TERM, + "**": _Precedence.POWER, + } + + binop_rassoc = frozenset(("**",)) + def visit_BinOp(self, node): + operator = self.binop[node.op.__class__.__name__] + operator_precedence = self.binop_precedence[operator] + with self.require_parens(operator_precedence, node): + if operator in self.binop_rassoc: + left_precedence = operator_precedence.next() + right_precedence = operator_precedence + else: + left_precedence = operator_precedence + right_precedence = operator_precedence.next() + + self.set_precedence(left_precedence, node.left) + self.traverse(node.left) + self.write(f" {operator} ") + self.set_precedence(right_precedence, node.right) + self.traverse(node.right) + + cmpops = { + "Eq": "==", + "NotEq": "!=", + "Lt": "<", + "LtE": "<=", + "Gt": ">", + "GtE": ">=", + "Is": "is", + "IsNot": "is not", + "In": "in", + "NotIn": "not in", + } + + def visit_Compare(self, node): + with self.require_parens(_Precedence.CMP, node): + self.set_precedence(_Precedence.CMP.next(), node.left, *node.comparators) + self.traverse(node.left) + for o, e in zip(node.ops, node.comparators): + self.write(" " + self.cmpops[o.__class__.__name__] + " ") + self.traverse(e) + + boolops = {"And": "and", "Or": "or"} + boolop_precedence = {"and": _Precedence.AND, "or": _Precedence.OR} + + def visit_BoolOp(self, node): + operator = self.boolops[node.op.__class__.__name__] + operator_precedence = self.boolop_precedence[operator] + + def increasing_level_traverse(node): + nonlocal operator_precedence + operator_precedence = operator_precedence.next() + self.set_precedence(operator_precedence, node) + self.traverse(node) + + with self.require_parens(operator_precedence, node): + s = f" {operator} " + self.interleave(lambda: self.write(s), increasing_level_traverse, node.values) + + def visit_Attribute(self, node): + self.set_precedence(_Precedence.ATOM, node.value) + self.traverse(node.value) + # Special case: 3.__abs__() is a syntax error, so if node.value + # is an integer literal then we need to either parenthesize + # it or add an extra space to get 3 .__abs__(). + if isinstance(node.value, Constant) and isinstance(node.value.value, int): + self.write(" ") + self.write(".") + self.write(node.attr) + + def visit_Call(self, node): + self.set_precedence(_Precedence.ATOM, node.func) + self.traverse(node.func) + with self.delimit("(", ")"): + comma = False + for e in node.args: + if comma: + self.write(", ") + else: + comma = True + self.traverse(e) + for e in node.keywords: + if comma: + self.write(", ") + else: + comma = True + self.traverse(e) + + def visit_Subscript(self, node): + def is_non_empty_tuple(slice_value): + return ( + isinstance(slice_value, Tuple) + and slice_value.elts + ) + + self.set_precedence(_Precedence.ATOM, node.value) + self.traverse(node.value) + with self.delimit("[", "]"): + if is_non_empty_tuple(node.slice): + # parentheses can be omitted if the tuple isn't empty + self.items_view(self.traverse, node.slice.elts) + else: + self.traverse(node.slice) + + def visit_Starred(self, node): + self.write("*") + self.set_precedence(_Precedence.EXPR, node.value) + self.traverse(node.value) + + def visit_Ellipsis(self, node): + self.write("...") + + def visit_Slice(self, node): + if node.lower: + self.traverse(node.lower) + self.write(":") + if node.upper: + self.traverse(node.upper) + if node.step: + self.write(":") + self.traverse(node.step) + + def visit_Match(self, node): + self.fill("match ", allow_semicolon=False) + self.traverse(node.subject) + with self.block(): + for case in node.cases: + self.traverse(case) + + def visit_arg(self, node): + self.write(node.arg) + if node.annotation: + self.write(": ") + self.traverse(node.annotation) + + def visit_arguments(self, node): + first = True + # normal arguments + all_args = node.posonlyargs + node.args + defaults = [None] * (len(all_args) - len(node.defaults)) + node.defaults + for index, elements in enumerate(zip(all_args, defaults), 1): + a, d = elements + if first: + first = False + else: + self.write(", ") + self.traverse(a) + if d: + self.write("=") + self.traverse(d) + if index == len(node.posonlyargs): + self.write(", /") + + # varargs, or bare '*' if no varargs but keyword-only arguments present + if node.vararg or node.kwonlyargs: + if first: + first = False + else: + self.write(", ") + self.write("*") + if node.vararg: + self.write(node.vararg.arg) + if node.vararg.annotation: + self.write(": ") + self.traverse(node.vararg.annotation) + + # keyword-only arguments + if node.kwonlyargs: + for a, d in zip(node.kwonlyargs, node.kw_defaults): + self.write(", ") + self.traverse(a) + if d: + self.write("=") + self.traverse(d) + + # kwargs + if node.kwarg: + if first: + first = False + else: + self.write(", ") + self.write("**" + node.kwarg.arg) + if node.kwarg.annotation: + self.write(": ") + self.traverse(node.kwarg.annotation) + + def visit_keyword(self, node): + if node.arg is None: + self.write("**") + else: + self.write(node.arg) + self.write("=") + self.traverse(node.value) + + def visit_Lambda(self, node): + with self.require_parens(_Precedence.TEST, node): + self.write("lambda") + with self.buffered() as buffer: + self.traverse(node.args) + if buffer: + self.write(" ", *buffer) + self.write(": ") + self.set_precedence(_Precedence.TEST, node.body) + self.traverse(node.body) + + def visit_alias(self, node): + self.write(node.name) + if node.asname: + self.write(" as " + node.asname) + + def visit_withitem(self, node): + self.traverse(node.context_expr) + if node.optional_vars: + self.write(" as ") + self.traverse(node.optional_vars) + + def visit_match_case(self, node): + self.fill("case ", allow_semicolon=False) + self.traverse(node.pattern) + if node.guard: + self.write(" if ") + self.traverse(node.guard) + with self.block(): + self.traverse(node.body) + + def visit_MatchValue(self, node): + self.traverse(node.value) + + def visit_MatchSingleton(self, node): + self._write_constant(node.value) + + def visit_MatchSequence(self, node): + with self.delimit("[", "]"): + self.interleave( + lambda: self.write(", "), self.traverse, node.patterns + ) + + def visit_MatchStar(self, node): + name = node.name + if name is None: + name = "_" + self.write(f"*{name}") + + def visit_MatchMapping(self, node): + def write_key_pattern_pair(pair): + k, p = pair + self.traverse(k) + self.write(": ") + self.traverse(p) + + with self.delimit("{", "}"): + keys = node.keys + self.interleave( + lambda: self.write(", "), + write_key_pattern_pair, + zip(keys, node.patterns, strict=True), + ) + rest = node.rest + if rest is not None: + if keys: + self.write(", ") + self.write(f"**{rest}") + + def visit_MatchClass(self, node): + self.set_precedence(_Precedence.ATOM, node.cls) + self.traverse(node.cls) + with self.delimit("(", ")"): + patterns = node.patterns + self.interleave( + lambda: self.write(", "), self.traverse, patterns + ) + attrs = node.kwd_attrs + if attrs: + def write_attr_pattern(pair): + attr, pattern = pair + self.write(f"{attr}=") + self.traverse(pattern) + + if patterns: + self.write(", ") + self.interleave( + lambda: self.write(", "), + write_attr_pattern, + zip(attrs, node.kwd_patterns, strict=True), + ) + + def visit_MatchAs(self, node): + name = node.name + pattern = node.pattern + if name is None: + self.write("_") + elif pattern is None: + self.write(node.name) + else: + with self.require_parens(_Precedence.TEST, node): + self.set_precedence(_Precedence.BOR, node.pattern) + self.traverse(node.pattern) + self.write(f" as {node.name}") + + def visit_MatchOr(self, node): + with self.require_parens(_Precedence.BOR, node): + self.set_precedence(_Precedence.BOR.next(), *node.patterns) + self.interleave(lambda: self.write(" | "), self.traverse, node.patterns) diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/_collections_abc.py b/src/_nfdos/rootfs/usr/lib/python3.13/_collections_abc.py new file mode 100644 index 0000000..60b4713 --- /dev/null +++ b/src/_nfdos/rootfs/usr/lib/python3.13/_collections_abc.py @@ -0,0 +1,1177 @@ +# Copyright 2007 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Abstract Base Classes (ABCs) for collections, according to PEP 3119. + +Unit tests are in test_collections. +""" + +############ Maintenance notes ######################################### +# +# ABCs are different from other standard library modules in that they +# specify compliance tests. In general, once an ABC has been published, +# new methods (either abstract or concrete) cannot be added. +# +# Though classes that inherit from an ABC would automatically receive a +# new mixin method, registered classes would become non-compliant and +# violate the contract promised by ``isinstance(someobj, SomeABC)``. +# +# Though irritating, the correct procedure for adding new abstract or +# mixin methods is to create a new ABC as a subclass of the previous +# ABC. For example, union(), intersection(), and difference() cannot +# be added to Set but could go into a new ABC that extends Set. +# +# Because they are so hard to change, new ABCs should have their APIs +# carefully thought through prior to publication. +# +# Since ABCMeta only checks for the presence of methods, it is possible +# to alter the signature of a method by adding optional arguments +# or changing parameters names. This is still a bit dubious but at +# least it won't cause isinstance() to return an incorrect result. +# +# +####################################################################### + +from abc import ABCMeta, abstractmethod +import sys + +GenericAlias = type(list[int]) +EllipsisType = type(...) +def _f(): pass +FunctionType = type(_f) +del _f + +__all__ = ["Awaitable", "Coroutine", + "AsyncIterable", "AsyncIterator", "AsyncGenerator", + "Hashable", "Iterable", "Iterator", "Generator", "Reversible", + "Sized", "Container", "Callable", "Collection", + "Set", "MutableSet", + "Mapping", "MutableMapping", + "MappingView", "KeysView", "ItemsView", "ValuesView", + "Sequence", "MutableSequence", + "Buffer", + ] + +# This module has been renamed from collections.abc to _collections_abc to +# speed up interpreter startup. Some of the types such as MutableMapping are +# required early but collections module imports a lot of other modules. +# See issue #19218 +__name__ = "collections.abc" + +# Private list of types that we want to register with the various ABCs +# so that they will pass tests like: +# it = iter(somebytearray) +# assert isinstance(it, Iterable) +# Note: in other implementations, these types might not be distinct +# and they may have their own implementation specific types that +# are not included on this list. +bytes_iterator = type(iter(b'')) +bytearray_iterator = type(iter(bytearray())) +#callable_iterator = ??? +dict_keyiterator = type(iter({}.keys())) +dict_valueiterator = type(iter({}.values())) +dict_itemiterator = type(iter({}.items())) +list_iterator = type(iter([])) +list_reverseiterator = type(iter(reversed([]))) +range_iterator = type(iter(range(0))) +longrange_iterator = type(iter(range(1 << 1000))) +set_iterator = type(iter(set())) +str_iterator = type(iter("")) +tuple_iterator = type(iter(())) +zip_iterator = type(iter(zip())) +## views ## +dict_keys = type({}.keys()) +dict_values = type({}.values()) +dict_items = type({}.items()) +## misc ## +mappingproxy = type(type.__dict__) +def _get_framelocalsproxy(): + return type(sys._getframe().f_locals) +framelocalsproxy = _get_framelocalsproxy() +del _get_framelocalsproxy +generator = type((lambda: (yield))()) +## coroutine ## +async def _coro(): pass +_coro = _coro() +coroutine = type(_coro) +_coro.close() # Prevent ResourceWarning +del _coro +## asynchronous generator ## +async def _ag(): yield +_ag = _ag() +async_generator = type(_ag) +del _ag + + +### ONE-TRICK PONIES ### + +def _check_methods(C, *methods): + mro = C.__mro__ + for method in methods: + for B in mro: + if method in B.__dict__: + if B.__dict__[method] is None: + return NotImplemented + break + else: + return NotImplemented + return True + +class Hashable(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __hash__(self): + return 0 + + @classmethod + def __subclasshook__(cls, C): + if cls is Hashable: + return _check_methods(C, "__hash__") + return NotImplemented + + +class Awaitable(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __await__(self): + yield + + @classmethod + def __subclasshook__(cls, C): + if cls is Awaitable: + return _check_methods(C, "__await__") + return NotImplemented + + __class_getitem__ = classmethod(GenericAlias) + + +class Coroutine(Awaitable): + + __slots__ = () + + @abstractmethod + def send(self, value): + """Send a value into the coroutine. + Return next yielded value or raise StopIteration. + """ + raise StopIteration + + @abstractmethod + def throw(self, typ, val=None, tb=None): + """Raise an exception in the coroutine. + Return next yielded value or raise StopIteration. + """ + if val is None: + if tb is None: + raise typ + val = typ() + if tb is not None: + val = val.with_traceback(tb) + raise val + + def close(self): + """Raise GeneratorExit inside coroutine. + """ + try: + self.throw(GeneratorExit) + except (GeneratorExit, StopIteration): + pass + else: + raise RuntimeError("coroutine ignored GeneratorExit") + + @classmethod + def __subclasshook__(cls, C): + if cls is Coroutine: + return _check_methods(C, '__await__', 'send', 'throw', 'close') + return NotImplemented + + +Coroutine.register(coroutine) + + +class AsyncIterable(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __aiter__(self): + return AsyncIterator() + + @classmethod + def __subclasshook__(cls, C): + if cls is AsyncIterable: + return _check_methods(C, "__aiter__") + return NotImplemented + + __class_getitem__ = classmethod(GenericAlias) + + +class AsyncIterator(AsyncIterable): + + __slots__ = () + + @abstractmethod + async def __anext__(self): + """Return the next item or raise StopAsyncIteration when exhausted.""" + raise StopAsyncIteration + + def __aiter__(self): + return self + + @classmethod + def __subclasshook__(cls, C): + if cls is AsyncIterator: + return _check_methods(C, "__anext__", "__aiter__") + return NotImplemented + + +class AsyncGenerator(AsyncIterator): + + __slots__ = () + + async def __anext__(self): + """Return the next item from the asynchronous generator. + When exhausted, raise StopAsyncIteration. + """ + return await self.asend(None) + + @abstractmethod + async def asend(self, value): + """Send a value into the asynchronous generator. + Return next yielded value or raise StopAsyncIteration. + """ + raise StopAsyncIteration + + @abstractmethod + async def athrow(self, typ, val=None, tb=None): + """Raise an exception in the asynchronous generator. + Return next yielded value or raise StopAsyncIteration. + """ + if val is None: + if tb is None: + raise typ + val = typ() + if tb is not None: + val = val.with_traceback(tb) + raise val + + async def aclose(self): + """Raise GeneratorExit inside coroutine. + """ + try: + await self.athrow(GeneratorExit) + except (GeneratorExit, StopAsyncIteration): + pass + else: + raise RuntimeError("asynchronous generator ignored GeneratorExit") + + @classmethod + def __subclasshook__(cls, C): + if cls is AsyncGenerator: + return _check_methods(C, '__aiter__', '__anext__', + 'asend', 'athrow', 'aclose') + return NotImplemented + + +AsyncGenerator.register(async_generator) + + +class Iterable(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __iter__(self): + while False: + yield None + + @classmethod + def __subclasshook__(cls, C): + if cls is Iterable: + return _check_methods(C, "__iter__") + return NotImplemented + + __class_getitem__ = classmethod(GenericAlias) + + +class Iterator(Iterable): + + __slots__ = () + + @abstractmethod + def __next__(self): + 'Return the next item from the iterator. When exhausted, raise StopIteration' + raise StopIteration + + def __iter__(self): + return self + + @classmethod + def __subclasshook__(cls, C): + if cls is Iterator: + return _check_methods(C, '__iter__', '__next__') + return NotImplemented + + +Iterator.register(bytes_iterator) +Iterator.register(bytearray_iterator) +#Iterator.register(callable_iterator) +Iterator.register(dict_keyiterator) +Iterator.register(dict_valueiterator) +Iterator.register(dict_itemiterator) +Iterator.register(list_iterator) +Iterator.register(list_reverseiterator) +Iterator.register(range_iterator) +Iterator.register(longrange_iterator) +Iterator.register(set_iterator) +Iterator.register(str_iterator) +Iterator.register(tuple_iterator) +Iterator.register(zip_iterator) + + +class Reversible(Iterable): + + __slots__ = () + + @abstractmethod + def __reversed__(self): + while False: + yield None + + @classmethod + def __subclasshook__(cls, C): + if cls is Reversible: + return _check_methods(C, "__reversed__", "__iter__") + return NotImplemented + + +class Generator(Iterator): + + __slots__ = () + + def __next__(self): + """Return the next item from the generator. + When exhausted, raise StopIteration. + """ + return self.send(None) + + @abstractmethod + def send(self, value): + """Send a value into the generator. + Return next yielded value or raise StopIteration. + """ + raise StopIteration + + @abstractmethod + def throw(self, typ, val=None, tb=None): + """Raise an exception in the generator. + Return next yielded value or raise StopIteration. + """ + if val is None: + if tb is None: + raise typ + val = typ() + if tb is not None: + val = val.with_traceback(tb) + raise val + + def close(self): + """Raise GeneratorExit inside generator. + """ + try: + self.throw(GeneratorExit) + except (GeneratorExit, StopIteration): + pass + else: + raise RuntimeError("generator ignored GeneratorExit") + + @classmethod + def __subclasshook__(cls, C): + if cls is Generator: + return _check_methods(C, '__iter__', '__next__', + 'send', 'throw', 'close') + return NotImplemented + + +Generator.register(generator) + + +class Sized(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __len__(self): + return 0 + + @classmethod + def __subclasshook__(cls, C): + if cls is Sized: + return _check_methods(C, "__len__") + return NotImplemented + + +class Container(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __contains__(self, x): + return False + + @classmethod + def __subclasshook__(cls, C): + if cls is Container: + return _check_methods(C, "__contains__") + return NotImplemented + + __class_getitem__ = classmethod(GenericAlias) + + +class Collection(Sized, Iterable, Container): + + __slots__ = () + + @classmethod + def __subclasshook__(cls, C): + if cls is Collection: + return _check_methods(C, "__len__", "__iter__", "__contains__") + return NotImplemented + + +class Buffer(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __buffer__(self, flags: int, /) -> memoryview: + raise NotImplementedError + + @classmethod + def __subclasshook__(cls, C): + if cls is Buffer: + return _check_methods(C, "__buffer__") + return NotImplemented + + +class _CallableGenericAlias(GenericAlias): + """ Represent `Callable[argtypes, resulttype]`. + + This sets ``__args__`` to a tuple containing the flattened ``argtypes`` + followed by ``resulttype``. + + Example: ``Callable[[int, str], float]`` sets ``__args__`` to + ``(int, str, float)``. + """ + + __slots__ = () + + def __new__(cls, origin, args): + if not (isinstance(args, tuple) and len(args) == 2): + raise TypeError( + "Callable must be used as Callable[[arg, ...], result].") + t_args, t_result = args + if isinstance(t_args, (tuple, list)): + args = (*t_args, t_result) + elif not _is_param_expr(t_args): + raise TypeError(f"Expected a list of types, an ellipsis, " + f"ParamSpec, or Concatenate. Got {t_args}") + return super().__new__(cls, origin, args) + + def __repr__(self): + if len(self.__args__) == 2 and _is_param_expr(self.__args__[0]): + return super().__repr__() + from annotationlib import type_repr + return (f'collections.abc.Callable' + f'[[{", ".join([type_repr(a) for a in self.__args__[:-1]])}], ' + f'{type_repr(self.__args__[-1])}]') + + def __reduce__(self): + args = self.__args__ + if not (len(args) == 2 and _is_param_expr(args[0])): + args = list(args[:-1]), args[-1] + return _CallableGenericAlias, (Callable, args) + + def __getitem__(self, item): + # Called during TypeVar substitution, returns the custom subclass + # rather than the default types.GenericAlias object. Most of the + # code is copied from typing's _GenericAlias and the builtin + # types.GenericAlias. + if not isinstance(item, tuple): + item = (item,) + + new_args = super().__getitem__(item).__args__ + + # args[0] occurs due to things like Z[[int, str, bool]] from PEP 612 + if not isinstance(new_args[0], (tuple, list)): + t_result = new_args[-1] + t_args = new_args[:-1] + new_args = (t_args, t_result) + return _CallableGenericAlias(Callable, tuple(new_args)) + +def _is_param_expr(obj): + """Checks if obj matches either a list of types, ``...``, ``ParamSpec`` or + ``_ConcatenateGenericAlias`` from typing.py + """ + if obj is Ellipsis: + return True + if isinstance(obj, list): + return True + obj = type(obj) + names = ('ParamSpec', '_ConcatenateGenericAlias') + return obj.__module__ == 'typing' and any(obj.__name__ == name for name in names) + + +class Callable(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __call__(self, *args, **kwds): + return False + + @classmethod + def __subclasshook__(cls, C): + if cls is Callable: + return _check_methods(C, "__call__") + return NotImplemented + + __class_getitem__ = classmethod(_CallableGenericAlias) + + +### SETS ### + + +class Set(Collection): + """A set is a finite, iterable container. + + This class provides concrete generic implementations of all + methods except for __contains__, __iter__ and __len__. + + To override the comparisons (presumably for speed, as the + semantics are fixed), redefine __le__ and __ge__, + then the other operations will automatically follow suit. + """ + + __slots__ = () + + def __le__(self, other): + if not isinstance(other, Set): + return NotImplemented + if len(self) > len(other): + return False + for elem in self: + if elem not in other: + return False + return True + + def __lt__(self, other): + if not isinstance(other, Set): + return NotImplemented + return len(self) < len(other) and self.__le__(other) + + def __gt__(self, other): + if not isinstance(other, Set): + return NotImplemented + return len(self) > len(other) and self.__ge__(other) + + def __ge__(self, other): + if not isinstance(other, Set): + return NotImplemented + if len(self) < len(other): + return False + for elem in other: + if elem not in self: + return False + return True + + def __eq__(self, other): + if not isinstance(other, Set): + return NotImplemented + return len(self) == len(other) and self.__le__(other) + + @classmethod + def _from_iterable(cls, it): + '''Construct an instance of the class from any iterable input. + + Must override this method if the class constructor signature + does not accept an iterable for an input. + ''' + return cls(it) + + def __and__(self, other): + if not isinstance(other, Iterable): + return NotImplemented + return self._from_iterable(value for value in other if value in self) + + __rand__ = __and__ + + def isdisjoint(self, other): + 'Return True if two sets have a null intersection.' + for value in other: + if value in self: + return False + return True + + def __or__(self, other): + if not isinstance(other, Iterable): + return NotImplemented + chain = (e for s in (self, other) for e in s) + return self._from_iterable(chain) + + __ror__ = __or__ + + def __sub__(self, other): + if not isinstance(other, Set): + if not isinstance(other, Iterable): + return NotImplemented + other = self._from_iterable(other) + return self._from_iterable(value for value in self + if value not in other) + + def __rsub__(self, other): + if not isinstance(other, Set): + if not isinstance(other, Iterable): + return NotImplemented + other = self._from_iterable(other) + return self._from_iterable(value for value in other + if value not in self) + + def __xor__(self, other): + if not isinstance(other, Set): + if not isinstance(other, Iterable): + return NotImplemented + other = self._from_iterable(other) + return (self - other) | (other - self) + + __rxor__ = __xor__ + + def _hash(self): + """Compute the hash value of a set. + + Note that we don't define __hash__: not all sets are hashable. + But if you define a hashable set type, its __hash__ should + call this function. + + This must be compatible __eq__. + + All sets ought to compare equal if they contain the same + elements, regardless of how they are implemented, and + regardless of the order of the elements; so there's not much + freedom for __eq__ or __hash__. We match the algorithm used + by the built-in frozenset type. + """ + MAX = sys.maxsize + MASK = 2 * MAX + 1 + n = len(self) + h = 1927868237 * (n + 1) + h &= MASK + for x in self: + hx = hash(x) + h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167 + h &= MASK + h ^= (h >> 11) ^ (h >> 25) + h = h * 69069 + 907133923 + h &= MASK + if h > MAX: + h -= MASK + 1 + if h == -1: + h = 590923713 + return h + + +Set.register(frozenset) + + +class MutableSet(Set): + """A mutable set is a finite, iterable container. + + This class provides concrete generic implementations of all + methods except for __contains__, __iter__, __len__, + add(), and discard(). + + To override the comparisons (presumably for speed, as the + semantics are fixed), all you have to do is redefine __le__ and + then the other operations will automatically follow suit. + """ + + __slots__ = () + + @abstractmethod + def add(self, value): + """Add an element.""" + raise NotImplementedError + + @abstractmethod + def discard(self, value): + """Remove an element. Do not raise an exception if absent.""" + raise NotImplementedError + + def remove(self, value): + """Remove an element. If not a member, raise a KeyError.""" + if value not in self: + raise KeyError(value) + self.discard(value) + + def pop(self): + """Return the popped value. Raise KeyError if empty.""" + it = iter(self) + try: + value = next(it) + except StopIteration: + raise KeyError from None + self.discard(value) + return value + + def clear(self): + """This is slow (creates N new iterators!) but effective.""" + try: + while True: + self.pop() + except KeyError: + pass + + def __ior__(self, it): + for value in it: + self.add(value) + return self + + def __iand__(self, it): + for value in (self - it): + self.discard(value) + return self + + def __ixor__(self, it): + if it is self: + self.clear() + else: + if not isinstance(it, Set): + it = self._from_iterable(it) + for value in it: + if value in self: + self.discard(value) + else: + self.add(value) + return self + + def __isub__(self, it): + if it is self: + self.clear() + else: + for value in it: + self.discard(value) + return self + + +MutableSet.register(set) + + +### MAPPINGS ### + +class Mapping(Collection): + """A Mapping is a generic container for associating key/value + pairs. + + This class provides concrete generic implementations of all + methods except for __getitem__, __iter__, and __len__. + """ + + __slots__ = () + + # Tell ABCMeta.__new__ that this class should have TPFLAGS_MAPPING set. + __abc_tpflags__ = 1 << 6 # Py_TPFLAGS_MAPPING + + @abstractmethod + def __getitem__(self, key): + raise KeyError + + def get(self, key, default=None): + 'D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.' + try: + return self[key] + except KeyError: + return default + + def __contains__(self, key): + try: + self[key] + except KeyError: + return False + else: + return True + + def keys(self): + "D.keys() -> a set-like object providing a view on D's keys" + return KeysView(self) + + def items(self): + "D.items() -> a set-like object providing a view on D's items" + return ItemsView(self) + + def values(self): + "D.values() -> an object providing a view on D's values" + return ValuesView(self) + + def __eq__(self, other): + if not isinstance(other, Mapping): + return NotImplemented + return dict(self.items()) == dict(other.items()) + + __reversed__ = None + +Mapping.register(mappingproxy) +Mapping.register(framelocalsproxy) + + +class MappingView(Sized): + + __slots__ = '_mapping', + + def __init__(self, mapping): + self._mapping = mapping + + def __len__(self): + return len(self._mapping) + + def __repr__(self): + return '{0.__class__.__name__}({0._mapping!r})'.format(self) + + __class_getitem__ = classmethod(GenericAlias) + + +class KeysView(MappingView, Set): + + __slots__ = () + + @classmethod + def _from_iterable(cls, it): + return set(it) + + def __contains__(self, key): + return key in self._mapping + + def __iter__(self): + yield from self._mapping + + +KeysView.register(dict_keys) + + +class ItemsView(MappingView, Set): + + __slots__ = () + + @classmethod + def _from_iterable(cls, it): + return set(it) + + def __contains__(self, item): + key, value = item + try: + v = self._mapping[key] + except KeyError: + return False + else: + return v is value or v == value + + def __iter__(self): + for key in self._mapping: + yield (key, self._mapping[key]) + + +ItemsView.register(dict_items) + + +class ValuesView(MappingView, Collection): + + __slots__ = () + + def __contains__(self, value): + for key in self._mapping: + v = self._mapping[key] + if v is value or v == value: + return True + return False + + def __iter__(self): + for key in self._mapping: + yield self._mapping[key] + + +ValuesView.register(dict_values) + + +class MutableMapping(Mapping): + """A MutableMapping is a generic container for associating + key/value pairs. + + This class provides concrete generic implementations of all + methods except for __getitem__, __setitem__, __delitem__, + __iter__, and __len__. + """ + + __slots__ = () + + @abstractmethod + def __setitem__(self, key, value): + raise KeyError + + @abstractmethod + def __delitem__(self, key): + raise KeyError + + __marker = object() + + def pop(self, key, default=__marker): + '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value. + If key is not found, d is returned if given, otherwise KeyError is raised. + ''' + try: + value = self[key] + except KeyError: + if default is self.__marker: + raise + return default + else: + del self[key] + return value + + def popitem(self): + '''D.popitem() -> (k, v), remove and return some (key, value) pair + as a 2-tuple; but raise KeyError if D is empty. + ''' + try: + key = next(iter(self)) + except StopIteration: + raise KeyError from None + value = self[key] + del self[key] + return key, value + + def clear(self): + 'D.clear() -> None. Remove all items from D.' + try: + while True: + self.popitem() + except KeyError: + pass + + def update(self, other=(), /, **kwds): + ''' D.update([E, ]**F) -> None. Update D from mapping/iterable E and F. + If E present and has a .keys() method, does: for k in E.keys(): D[k] = E[k] + If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v + In either case, this is followed by: for k, v in F.items(): D[k] = v + ''' + if isinstance(other, Mapping): + for key in other: + self[key] = other[key] + elif hasattr(other, "keys"): + for key in other.keys(): + self[key] = other[key] + else: + for key, value in other: + self[key] = value + for key, value in kwds.items(): + self[key] = value + + def setdefault(self, key, default=None): + 'D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D' + try: + return self[key] + except KeyError: + self[key] = default + return default + + +MutableMapping.register(dict) + + +### SEQUENCES ### + +class Sequence(Reversible, Collection): + """All the operations on a read-only sequence. + + Concrete subclasses must override __new__ or __init__, + __getitem__, and __len__. + """ + + __slots__ = () + + # Tell ABCMeta.__new__ that this class should have TPFLAGS_SEQUENCE set. + __abc_tpflags__ = 1 << 5 # Py_TPFLAGS_SEQUENCE + + @abstractmethod + def __getitem__(self, index): + raise IndexError + + def __iter__(self): + i = 0 + try: + while True: + v = self[i] + yield v + i += 1 + except IndexError: + return + + def __contains__(self, value): + for v in self: + if v is value or v == value: + return True + return False + + def __reversed__(self): + for i in reversed(range(len(self))): + yield self[i] + + def index(self, value, start=0, stop=None): + '''S.index(value, [start, [stop]]) -> integer -- return first index of value. + Raises ValueError if the value is not present. + + Supporting start and stop arguments is optional, but + recommended. + ''' + if start is not None and start < 0: + start = max(len(self) + start, 0) + if stop is not None and stop < 0: + stop += len(self) + + i = start + while stop is None or i < stop: + try: + v = self[i] + except IndexError: + break + if v is value or v == value: + return i + i += 1 + raise ValueError + + def count(self, value): + 'S.count(value) -> integer -- return number of occurrences of value' + return sum(1 for v in self if v is value or v == value) + +Sequence.register(tuple) +Sequence.register(str) +Sequence.register(bytes) +Sequence.register(range) +Sequence.register(memoryview) + +class _DeprecateByteStringMeta(ABCMeta): + def __new__(cls, name, bases, namespace, **kwargs): + if name != "ByteString": + import warnings + + warnings._deprecated( + "collections.abc.ByteString", + remove=(3, 17), + ) + return super().__new__(cls, name, bases, namespace, **kwargs) + + def __instancecheck__(cls, instance): + import warnings + + warnings._deprecated( + "collections.abc.ByteString", + remove=(3, 17), + ) + return super().__instancecheck__(instance) + +class ByteString(Sequence, metaclass=_DeprecateByteStringMeta): + """Deprecated ABC serving as a common supertype of ``bytes`` and ``bytearray``. + + This ABC is scheduled for removal in Python 3.17. + Use ``isinstance(obj, collections.abc.Buffer)`` to test if ``obj`` + implements the buffer protocol at runtime. For use in type annotations, + either use ``Buffer`` or a union that explicitly specifies the types your + code supports (e.g., ``bytes | bytearray | memoryview``). + """ + + __slots__ = () + +ByteString.register(bytes) +ByteString.register(bytearray) + + +class MutableSequence(Sequence): + """All the operations on a read-write sequence. + + Concrete subclasses must provide __new__ or __init__, + __getitem__, __setitem__, __delitem__, __len__, and insert(). + """ + + __slots__ = () + + @abstractmethod + def __setitem__(self, index, value): + raise IndexError + + @abstractmethod + def __delitem__(self, index): + raise IndexError + + @abstractmethod + def insert(self, index, value): + 'S.insert(index, value) -- insert value before index' + raise IndexError + + def append(self, value): + 'S.append(value) -- append value to the end of the sequence' + self.insert(len(self), value) + + def clear(self): + 'S.clear() -> None -- remove all items from S' + try: + while True: + self.pop() + except IndexError: + pass + + def reverse(self): + 'S.reverse() -- reverse *IN PLACE*' + n = len(self) + for i in range(n//2): + self[i], self[n-i-1] = self[n-i-1], self[i] + + def extend(self, values): + 'S.extend(iterable) -- extend sequence by appending elements from the iterable' + if values is self: + values = list(values) + for v in values: + self.append(v) + + def pop(self, index=-1): + '''S.pop([index]) -> item -- remove and return item at index (default last). + Raise IndexError if list is empty or index is out of range. + ''' + v = self[index] + del self[index] + return v + + def remove(self, value): + '''S.remove(value) -- remove first occurrence of value. + Raise ValueError if the value is not present. + ''' + del self[self.index(value)] + + def __iadd__(self, values): + self.extend(values) + return self + + +MutableSequence.register(list) +MutableSequence.register(bytearray) + +_deprecated_ByteString = globals().pop("ByteString") + +def __getattr__(attr): + if attr == "ByteString": + import warnings + warnings._deprecated("collections.abc.ByteString", remove=(3, 17)) + globals()["ByteString"] = _deprecated_ByteString + return _deprecated_ByteString + raise AttributeError(f"module 'collections.abc' has no attribute {attr!r}") diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/_colorize.py b/src/_nfdos/rootfs/usr/lib/python3.13/_colorize.py new file mode 100644 index 0000000..d354862 --- /dev/null +++ b/src/_nfdos/rootfs/usr/lib/python3.13/_colorize.py @@ -0,0 +1,371 @@ +import io +import os +import sys + +from collections.abc import Callable, Iterator, Mapping +from dataclasses import dataclass, field, Field + +COLORIZE = True + + +# types +if False: + from typing import IO, Self, ClassVar + _theme: Theme + + +class ANSIColors: + RESET = "\x1b[0m" + + BLACK = "\x1b[30m" + BLUE = "\x1b[34m" + CYAN = "\x1b[36m" + GREEN = "\x1b[32m" + GREY = "\x1b[90m" + MAGENTA = "\x1b[35m" + RED = "\x1b[31m" + WHITE = "\x1b[37m" # more like LIGHT GRAY + YELLOW = "\x1b[33m" + + BOLD = "\x1b[1m" + BOLD_BLACK = "\x1b[1;30m" # DARK GRAY + BOLD_BLUE = "\x1b[1;34m" + BOLD_CYAN = "\x1b[1;36m" + BOLD_GREEN = "\x1b[1;32m" + BOLD_MAGENTA = "\x1b[1;35m" + BOLD_RED = "\x1b[1;31m" + BOLD_WHITE = "\x1b[1;37m" # actual WHITE + BOLD_YELLOW = "\x1b[1;33m" + + # intense = like bold but without being bold + INTENSE_BLACK = "\x1b[90m" + INTENSE_BLUE = "\x1b[94m" + INTENSE_CYAN = "\x1b[96m" + INTENSE_GREEN = "\x1b[92m" + INTENSE_MAGENTA = "\x1b[95m" + INTENSE_RED = "\x1b[91m" + INTENSE_WHITE = "\x1b[97m" + INTENSE_YELLOW = "\x1b[93m" + + BACKGROUND_BLACK = "\x1b[40m" + BACKGROUND_BLUE = "\x1b[44m" + BACKGROUND_CYAN = "\x1b[46m" + BACKGROUND_GREEN = "\x1b[42m" + BACKGROUND_MAGENTA = "\x1b[45m" + BACKGROUND_RED = "\x1b[41m" + BACKGROUND_WHITE = "\x1b[47m" + BACKGROUND_YELLOW = "\x1b[43m" + + INTENSE_BACKGROUND_BLACK = "\x1b[100m" + INTENSE_BACKGROUND_BLUE = "\x1b[104m" + INTENSE_BACKGROUND_CYAN = "\x1b[106m" + INTENSE_BACKGROUND_GREEN = "\x1b[102m" + INTENSE_BACKGROUND_MAGENTA = "\x1b[105m" + INTENSE_BACKGROUND_RED = "\x1b[101m" + INTENSE_BACKGROUND_WHITE = "\x1b[107m" + INTENSE_BACKGROUND_YELLOW = "\x1b[103m" + + +ColorCodes = set() +NoColors = ANSIColors() + +for attr, code in ANSIColors.__dict__.items(): + if not attr.startswith("__"): + ColorCodes.add(code) + setattr(NoColors, attr, "") + + +# +# Experimental theming support (see gh-133346) +# + +# - Create a theme by copying an existing `Theme` with one or more sections +# replaced, using `default_theme.copy_with()`; +# - create a theme section by copying an existing `ThemeSection` with one or +# more colors replaced, using for example `default_theme.syntax.copy_with()`; +# - create a theme from scratch by instantiating a `Theme` data class with +# the required sections (which are also dataclass instances). +# +# Then call `_colorize.set_theme(your_theme)` to set it. +# +# Put your theme configuration in $PYTHONSTARTUP for the interactive shell, +# or sitecustomize.py in your virtual environment or Python installation for +# other uses. Your applications can call `_colorize.set_theme()` too. +# +# Note that thanks to the dataclasses providing default values for all fields, +# creating a new theme or theme section from scratch is possible without +# specifying all keys. +# +# For example, here's a theme that makes punctuation and operators less prominent: +# +# try: +# from _colorize import set_theme, default_theme, Syntax, ANSIColors +# except ImportError: +# pass +# else: +# theme_with_dim_operators = default_theme.copy_with( +# syntax=Syntax(op=ANSIColors.INTENSE_BLACK), +# ) +# set_theme(theme_with_dim_operators) +# del set_theme, default_theme, Syntax, ANSIColors, theme_with_dim_operators +# +# Guarding the import ensures that your .pythonstartup file will still work in +# Python 3.13 and older. Deleting the variables ensures they don't remain in your +# interactive shell's global scope. + +class ThemeSection(Mapping[str, str]): + """A mixin/base class for theme sections. + + It enables dictionary access to a section, as well as implements convenience + methods. + """ + + # The two types below are just that: types to inform the type checker that the + # mixin will work in context of those fields existing + __dataclass_fields__: ClassVar[dict[str, Field[str]]] + _name_to_value: Callable[[str], str] + + def __post_init__(self) -> None: + name_to_value = {} + for color_name in self.__dataclass_fields__: + name_to_value[color_name] = getattr(self, color_name) + super().__setattr__('_name_to_value', name_to_value.__getitem__) + + def copy_with(self, **kwargs: str) -> Self: + color_state: dict[str, str] = {} + for color_name in self.__dataclass_fields__: + color_state[color_name] = getattr(self, color_name) + color_state.update(kwargs) + return type(self)(**color_state) + + @classmethod + def no_colors(cls) -> Self: + color_state: dict[str, str] = {} + for color_name in cls.__dataclass_fields__: + color_state[color_name] = "" + return cls(**color_state) + + def __getitem__(self, key: str) -> str: + return self._name_to_value(key) + + def __len__(self) -> int: + return len(self.__dataclass_fields__) + + def __iter__(self) -> Iterator[str]: + return iter(self.__dataclass_fields__) + + +@dataclass(frozen=True) +class Argparse(ThemeSection): + usage: str = ANSIColors.BOLD_BLUE + prog: str = ANSIColors.BOLD_MAGENTA + prog_extra: str = ANSIColors.MAGENTA + heading: str = ANSIColors.BOLD_BLUE + summary_long_option: str = ANSIColors.CYAN + summary_short_option: str = ANSIColors.GREEN + summary_label: str = ANSIColors.YELLOW + summary_action: str = ANSIColors.GREEN + long_option: str = ANSIColors.BOLD_CYAN + short_option: str = ANSIColors.BOLD_GREEN + label: str = ANSIColors.BOLD_YELLOW + action: str = ANSIColors.BOLD_GREEN + reset: str = ANSIColors.RESET + + +@dataclass(frozen=True, kw_only=True) +class Difflib(ThemeSection): + """A 'git diff'-like theme for `difflib.unified_diff`.""" + added: str = ANSIColors.GREEN + context: str = ANSIColors.RESET # context lines + header: str = ANSIColors.BOLD # eg "---" and "+++" lines + hunk: str = ANSIColors.CYAN # the "@@" lines + removed: str = ANSIColors.RED + reset: str = ANSIColors.RESET + + +@dataclass(frozen=True, kw_only=True) +class Syntax(ThemeSection): + prompt: str = ANSIColors.BOLD_MAGENTA + keyword: str = ANSIColors.BOLD_BLUE + keyword_constant: str = ANSIColors.BOLD_BLUE + builtin: str = ANSIColors.CYAN + comment: str = ANSIColors.RED + string: str = ANSIColors.GREEN + number: str = ANSIColors.YELLOW + op: str = ANSIColors.RESET + definition: str = ANSIColors.BOLD + soft_keyword: str = ANSIColors.BOLD_BLUE + reset: str = ANSIColors.RESET + + +@dataclass(frozen=True, kw_only=True) +class Traceback(ThemeSection): + type: str = ANSIColors.BOLD_MAGENTA + message: str = ANSIColors.MAGENTA + filename: str = ANSIColors.MAGENTA + line_no: str = ANSIColors.MAGENTA + frame: str = ANSIColors.MAGENTA + error_highlight: str = ANSIColors.BOLD_RED + error_range: str = ANSIColors.RED + reset: str = ANSIColors.RESET + + +@dataclass(frozen=True, kw_only=True) +class Unittest(ThemeSection): + passed: str = ANSIColors.GREEN + warn: str = ANSIColors.YELLOW + fail: str = ANSIColors.RED + fail_info: str = ANSIColors.BOLD_RED + reset: str = ANSIColors.RESET + + +@dataclass(frozen=True, kw_only=True) +class Theme: + """A suite of themes for all sections of Python. + + When adding a new one, remember to also modify `copy_with` and `no_colors` + below. + """ + argparse: Argparse = field(default_factory=Argparse) + difflib: Difflib = field(default_factory=Difflib) + syntax: Syntax = field(default_factory=Syntax) + traceback: Traceback = field(default_factory=Traceback) + unittest: Unittest = field(default_factory=Unittest) + + def copy_with( + self, + *, + argparse: Argparse | None = None, + difflib: Difflib | None = None, + syntax: Syntax | None = None, + traceback: Traceback | None = None, + unittest: Unittest | None = None, + ) -> Self: + """Return a new Theme based on this instance with some sections replaced. + + Themes are immutable to protect against accidental modifications that + could lead to invalid terminal states. + """ + return type(self)( + argparse=argparse or self.argparse, + difflib=difflib or self.difflib, + syntax=syntax or self.syntax, + traceback=traceback or self.traceback, + unittest=unittest or self.unittest, + ) + + @classmethod + def no_colors(cls) -> Self: + """Return a new Theme where colors in all sections are empty strings. + + This allows writing user code as if colors are always used. The color + fields will be ANSI color code strings when colorization is desired + and possible, and empty strings otherwise. + """ + return cls( + argparse=Argparse.no_colors(), + difflib=Difflib.no_colors(), + syntax=Syntax.no_colors(), + traceback=Traceback.no_colors(), + unittest=Unittest.no_colors(), + ) + + +def get_colors( + colorize: bool = False, *, file: IO[str] | IO[bytes] | None = None +) -> ANSIColors: + if colorize or can_colorize(file=file): + return ANSIColors() + else: + return NoColors + + +def decolor(text: str) -> str: + """Remove ANSI color codes from a string.""" + for code in ColorCodes: + text = text.replace(code, "") + return text + + +def can_colorize(*, file: IO[str] | IO[bytes] | None = None) -> bool: + + def _safe_getenv(k: str, fallback: str | None = None) -> str | None: + """Exception-safe environment retrieval. See gh-128636.""" + try: + return os.environ.get(k, fallback) + except Exception: + return fallback + + if file is None: + file = sys.stdout + + if not sys.flags.ignore_environment: + if _safe_getenv("PYTHON_COLORS") == "0": + return False + if _safe_getenv("PYTHON_COLORS") == "1": + return True + if _safe_getenv("NO_COLOR"): + return False + if not COLORIZE: + return False + if _safe_getenv("FORCE_COLOR"): + return True + if _safe_getenv("TERM") == "dumb": + return False + + if not hasattr(file, "fileno"): + return False + + if sys.platform == "win32": + try: + import nt + + if not nt._supports_virtual_terminal(): + return False + except (ImportError, AttributeError): + return False + + try: + return os.isatty(file.fileno()) + except io.UnsupportedOperation: + return hasattr(file, "isatty") and file.isatty() + + +default_theme = Theme() +theme_no_color = default_theme.no_colors() + + +def get_theme( + *, + tty_file: IO[str] | IO[bytes] | None = None, + force_color: bool = False, + force_no_color: bool = False, +) -> Theme: + """Returns the currently set theme, potentially in a zero-color variant. + + In cases where colorizing is not possible (see `can_colorize`), the returned + theme contains all empty strings in all color definitions. + See `Theme.no_colors()` for more information. + + It is recommended not to cache the result of this function for extended + periods of time because the user might influence theme selection by + the interactive shell, a debugger, or application-specific code. The + environment (including environment variable state and console configuration + on Windows) can also change in the course of the application life cycle. + """ + if force_color or (not force_no_color and + can_colorize(file=tty_file)): + return _theme + return theme_no_color + + +def set_theme(t: Theme) -> None: + global _theme + + if not isinstance(t, Theme): + raise ValueError(f"Expected Theme object, found {t}") + + _theme = t + + +set_theme(default_theme) diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/_compat_pickle.py b/src/_nfdos/rootfs/usr/lib/python3.13/_compat_pickle.py new file mode 100644 index 0000000..a981326 --- /dev/null +++ b/src/_nfdos/rootfs/usr/lib/python3.13/_compat_pickle.py @@ -0,0 +1,248 @@ +# This module is used to map the old Python 2 names to the new names used in +# Python 3 for the pickle module. This needed to make pickle streams +# generated with Python 2 loadable by Python 3. + +# This is a copy of lib2to3.fixes.fix_imports.MAPPING. We cannot import +# lib2to3 and use the mapping defined there, because lib2to3 uses pickle. +# Thus, this could cause the module to be imported recursively. +IMPORT_MAPPING = { + '__builtin__' : 'builtins', + 'copy_reg': 'copyreg', + 'Queue': 'queue', + 'SocketServer': 'socketserver', + 'ConfigParser': 'configparser', + 'repr': 'reprlib', + 'tkFileDialog': 'tkinter.filedialog', + 'tkSimpleDialog': 'tkinter.simpledialog', + 'tkColorChooser': 'tkinter.colorchooser', + 'tkCommonDialog': 'tkinter.commondialog', + 'Dialog': 'tkinter.dialog', + 'Tkdnd': 'tkinter.dnd', + 'tkFont': 'tkinter.font', + 'tkMessageBox': 'tkinter.messagebox', + 'ScrolledText': 'tkinter.scrolledtext', + 'Tkconstants': 'tkinter.constants', + 'ttk': 'tkinter.ttk', + 'Tkinter': 'tkinter', + 'markupbase': '_markupbase', + '_winreg': 'winreg', + 'thread': '_thread', + 'dummy_thread': '_dummy_thread', + 'dbhash': 'dbm.bsd', + 'dumbdbm': 'dbm.dumb', + 'dbm': 'dbm.ndbm', + 'gdbm': 'dbm.gnu', + 'xmlrpclib': 'xmlrpc.client', + 'SimpleXMLRPCServer': 'xmlrpc.server', + 'httplib': 'http.client', + 'htmlentitydefs' : 'html.entities', + 'HTMLParser' : 'html.parser', + 'Cookie': 'http.cookies', + 'cookielib': 'http.cookiejar', + 'BaseHTTPServer': 'http.server', + 'test.test_support': 'test.support', + 'commands': 'subprocess', + 'urlparse' : 'urllib.parse', + 'robotparser' : 'urllib.robotparser', + 'urllib2': 'urllib.request', + 'anydbm': 'dbm', + '_abcoll' : 'collections.abc', +} + + +# This contains rename rules that are easy to handle. We ignore the more +# complex stuff (e.g. mapping the names in the urllib and types modules). +# These rules should be run before import names are fixed. +NAME_MAPPING = { + ('__builtin__', 'xrange'): ('builtins', 'range'), + ('__builtin__', 'reduce'): ('functools', 'reduce'), + ('__builtin__', 'intern'): ('sys', 'intern'), + ('__builtin__', 'unichr'): ('builtins', 'chr'), + ('__builtin__', 'unicode'): ('builtins', 'str'), + ('__builtin__', 'long'): ('builtins', 'int'), + ('itertools', 'izip'): ('builtins', 'zip'), + ('itertools', 'imap'): ('builtins', 'map'), + ('itertools', 'ifilter'): ('builtins', 'filter'), + ('itertools', 'ifilterfalse'): ('itertools', 'filterfalse'), + ('itertools', 'izip_longest'): ('itertools', 'zip_longest'), + ('UserDict', 'IterableUserDict'): ('collections', 'UserDict'), + ('UserList', 'UserList'): ('collections', 'UserList'), + ('UserString', 'UserString'): ('collections', 'UserString'), + ('whichdb', 'whichdb'): ('dbm', 'whichdb'), + ('_socket', 'fromfd'): ('socket', 'fromfd'), + ('_multiprocessing', 'Connection'): ('multiprocessing.connection', 'Connection'), + ('multiprocessing.process', 'Process'): ('multiprocessing.context', 'Process'), + ('multiprocessing.forking', 'Popen'): ('multiprocessing.popen_fork', 'Popen'), + ('urllib', 'ContentTooShortError'): ('urllib.error', 'ContentTooShortError'), + ('urllib', 'getproxies'): ('urllib.request', 'getproxies'), + ('urllib', 'pathname2url'): ('urllib.request', 'pathname2url'), + ('urllib', 'quote_plus'): ('urllib.parse', 'quote_plus'), + ('urllib', 'quote'): ('urllib.parse', 'quote'), + ('urllib', 'unquote_plus'): ('urllib.parse', 'unquote_plus'), + ('urllib', 'unquote'): ('urllib.parse', 'unquote'), + ('urllib', 'url2pathname'): ('urllib.request', 'url2pathname'), + ('urllib', 'urlcleanup'): ('urllib.request', 'urlcleanup'), + ('urllib', 'urlencode'): ('urllib.parse', 'urlencode'), + ('urllib', 'urlopen'): ('urllib.request', 'urlopen'), + ('urllib', 'urlretrieve'): ('urllib.request', 'urlretrieve'), + ('urllib2', 'HTTPError'): ('urllib.error', 'HTTPError'), + ('urllib2', 'URLError'): ('urllib.error', 'URLError'), +} + +PYTHON2_EXCEPTIONS = ( + "ArithmeticError", + "AssertionError", + "AttributeError", + "BaseException", + "BufferError", + "BytesWarning", + "DeprecationWarning", + "EOFError", + "EnvironmentError", + "Exception", + "FloatingPointError", + "FutureWarning", + "GeneratorExit", + "IOError", + "ImportError", + "ImportWarning", + "IndentationError", + "IndexError", + "KeyError", + "KeyboardInterrupt", + "LookupError", + "MemoryError", + "NameError", + "NotImplementedError", + "OSError", + "OverflowError", + "PendingDeprecationWarning", + "ReferenceError", + "RuntimeError", + "RuntimeWarning", + # StandardError is gone in Python 3, so we map it to Exception + "StopIteration", + "SyntaxError", + "SyntaxWarning", + "SystemError", + "SystemExit", + "TabError", + "TypeError", + "UnboundLocalError", + "UnicodeDecodeError", + "UnicodeEncodeError", + "UnicodeError", + "UnicodeTranslateError", + "UnicodeWarning", + "UserWarning", + "ValueError", + "Warning", + "ZeroDivisionError", +) + +try: + WindowsError +except NameError: + pass +else: + PYTHON2_EXCEPTIONS += ("WindowsError",) + +for excname in PYTHON2_EXCEPTIONS: + NAME_MAPPING[("exceptions", excname)] = ("builtins", excname) + +MULTIPROCESSING_EXCEPTIONS = ( + 'AuthenticationError', + 'BufferTooShort', + 'ProcessError', + 'TimeoutError', +) + +for excname in MULTIPROCESSING_EXCEPTIONS: + NAME_MAPPING[("multiprocessing", excname)] = ("multiprocessing.context", excname) + +# Same, but for 3.x to 2.x +REVERSE_IMPORT_MAPPING = dict((v, k) for (k, v) in IMPORT_MAPPING.items()) +assert len(REVERSE_IMPORT_MAPPING) == len(IMPORT_MAPPING) +REVERSE_NAME_MAPPING = dict((v, k) for (k, v) in NAME_MAPPING.items()) +assert len(REVERSE_NAME_MAPPING) == len(NAME_MAPPING) + +# Non-mutual mappings. + +IMPORT_MAPPING.update({ + 'cPickle': 'pickle', + '_elementtree': 'xml.etree.ElementTree', + 'FileDialog': 'tkinter.filedialog', + 'SimpleDialog': 'tkinter.simpledialog', + 'DocXMLRPCServer': 'xmlrpc.server', + 'SimpleHTTPServer': 'http.server', + # For compatibility with broken pickles saved in old Python 3 versions + 'UserDict': 'collections', + 'UserList': 'collections', + 'UserString': 'collections', + 'whichdb': 'dbm', + 'StringIO': 'io', + 'cStringIO': 'io', +}) + +REVERSE_IMPORT_MAPPING.update({ + '_bz2': 'bz2', + '_dbm': 'dbm', + '_functools': 'functools', + '_gdbm': 'gdbm', + '_pickle': 'pickle', +}) + +NAME_MAPPING.update({ + ('__builtin__', 'basestring'): ('builtins', 'str'), + ('exceptions', 'StandardError'): ('builtins', 'Exception'), + ('UserDict', 'UserDict'): ('collections', 'UserDict'), + ('socket', '_socketobject'): ('socket', 'SocketType'), +}) + +REVERSE_NAME_MAPPING.update({ + ('_functools', 'reduce'): ('__builtin__', 'reduce'), + ('tkinter.filedialog', 'FileDialog'): ('FileDialog', 'FileDialog'), + ('tkinter.filedialog', 'LoadFileDialog'): ('FileDialog', 'LoadFileDialog'), + ('tkinter.filedialog', 'SaveFileDialog'): ('FileDialog', 'SaveFileDialog'), + ('tkinter.simpledialog', 'SimpleDialog'): ('SimpleDialog', 'SimpleDialog'), + ('xmlrpc.server', 'ServerHTMLDoc'): ('DocXMLRPCServer', 'ServerHTMLDoc'), + ('xmlrpc.server', 'XMLRPCDocGenerator'): + ('DocXMLRPCServer', 'XMLRPCDocGenerator'), + ('xmlrpc.server', 'DocXMLRPCRequestHandler'): + ('DocXMLRPCServer', 'DocXMLRPCRequestHandler'), + ('xmlrpc.server', 'DocXMLRPCServer'): + ('DocXMLRPCServer', 'DocXMLRPCServer'), + ('xmlrpc.server', 'DocCGIXMLRPCRequestHandler'): + ('DocXMLRPCServer', 'DocCGIXMLRPCRequestHandler'), + ('http.server', 'SimpleHTTPRequestHandler'): + ('SimpleHTTPServer', 'SimpleHTTPRequestHandler'), + ('_socket', 'socket'): ('socket', '_socketobject'), +}) + +PYTHON3_OSERROR_EXCEPTIONS = ( + 'BrokenPipeError', + 'ChildProcessError', + 'ConnectionAbortedError', + 'ConnectionError', + 'ConnectionRefusedError', + 'ConnectionResetError', + 'FileExistsError', + 'FileNotFoundError', + 'InterruptedError', + 'IsADirectoryError', + 'NotADirectoryError', + 'PermissionError', + 'ProcessLookupError', + 'TimeoutError', +) + +for excname in PYTHON3_OSERROR_EXCEPTIONS: + REVERSE_NAME_MAPPING[('builtins', excname)] = ('exceptions', 'OSError') + +PYTHON3_IMPORTERROR_EXCEPTIONS = ( + 'ModuleNotFoundError', +) + +for excname in PYTHON3_IMPORTERROR_EXCEPTIONS: + REVERSE_NAME_MAPPING[('builtins', excname)] = ('exceptions', 'ImportError') +del excname diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/_ios_support.py b/src/_nfdos/rootfs/usr/lib/python3.13/_ios_support.py new file mode 100644 index 0000000..20467a7 --- /dev/null +++ b/src/_nfdos/rootfs/usr/lib/python3.13/_ios_support.py @@ -0,0 +1,71 @@ +import sys +try: + from ctypes import cdll, c_void_p, c_char_p, util +except ImportError: + # ctypes is an optional module. If it's not present, we're limited in what + # we can tell about the system, but we don't want to prevent the module + # from working. + print("ctypes isn't available; iOS system calls will not be available", file=sys.stderr) + objc = None +else: + # ctypes is available. Load the ObjC library, and wrap the objc_getClass, + # sel_registerName methods + lib = util.find_library("objc") + if lib is None: + # Failed to load the objc library + raise ImportError("ObjC runtime library couldn't be loaded") + + objc = cdll.LoadLibrary(lib) + objc.objc_getClass.restype = c_void_p + objc.objc_getClass.argtypes = [c_char_p] + objc.sel_registerName.restype = c_void_p + objc.sel_registerName.argtypes = [c_char_p] + + +def get_platform_ios(): + # Determine if this is a simulator using the multiarch value + is_simulator = sys.implementation._multiarch.endswith("simulator") + + # We can't use ctypes; abort + if not objc: + return None + + # Most of the methods return ObjC objects + objc.objc_msgSend.restype = c_void_p + # All the methods used have no arguments. + objc.objc_msgSend.argtypes = [c_void_p, c_void_p] + + # Equivalent of: + # device = [UIDevice currentDevice] + UIDevice = objc.objc_getClass(b"UIDevice") + SEL_currentDevice = objc.sel_registerName(b"currentDevice") + device = objc.objc_msgSend(UIDevice, SEL_currentDevice) + + # Equivalent of: + # device_systemVersion = [device systemVersion] + SEL_systemVersion = objc.sel_registerName(b"systemVersion") + device_systemVersion = objc.objc_msgSend(device, SEL_systemVersion) + + # Equivalent of: + # device_systemName = [device systemName] + SEL_systemName = objc.sel_registerName(b"systemName") + device_systemName = objc.objc_msgSend(device, SEL_systemName) + + # Equivalent of: + # device_model = [device model] + SEL_model = objc.sel_registerName(b"model") + device_model = objc.objc_msgSend(device, SEL_model) + + # UTF8String returns a const char*; + SEL_UTF8String = objc.sel_registerName(b"UTF8String") + objc.objc_msgSend.restype = c_char_p + + # Equivalent of: + # system = [device_systemName UTF8String] + # release = [device_systemVersion UTF8String] + # model = [device_model UTF8String] + system = objc.objc_msgSend(device_systemName, SEL_UTF8String).decode() + release = objc.objc_msgSend(device_systemVersion, SEL_UTF8String).decode() + model = objc.objc_msgSend(device_model, SEL_UTF8String).decode() + + return system, release, model, is_simulator diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/_markupbase.py b/src/_nfdos/rootfs/usr/lib/python3.13/_markupbase.py new file mode 100644 index 0000000..614f0cd --- /dev/null +++ b/src/_nfdos/rootfs/usr/lib/python3.13/_markupbase.py @@ -0,0 +1,396 @@ +"""Shared support for scanning document type declarations in HTML and XHTML. + +This module is used as a foundation for the html.parser module. It has no +documented public API and should not be used directly. + +""" + +import re + +_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match +_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match +_commentclose = re.compile(r'--\s*>') +_markedsectionclose = re.compile(r']\s*]\s*>') + +# An analysis of the MS-Word extensions is available at +# http://web.archive.org/web/20060321153828/http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf + +_msmarkedsectionclose = re.compile(r']\s*>') + +del re + + +class ParserBase: + """Parser base class which provides some common support methods used + by the SGML/HTML and XHTML parsers.""" + + def __init__(self): + if self.__class__ is ParserBase: + raise RuntimeError( + "_markupbase.ParserBase must be subclassed") + + def reset(self): + self.lineno = 1 + self.offset = 0 + + def getpos(self): + """Return current line number and offset.""" + return self.lineno, self.offset + + # Internal -- update line number and offset. This should be + # called for each piece of data exactly once, in order -- in other + # words the concatenation of all the input strings to this + # function should be exactly the entire input. + def updatepos(self, i, j): + if i >= j: + return j + rawdata = self.rawdata + nlines = rawdata.count("\n", i, j) + if nlines: + self.lineno = self.lineno + nlines + pos = rawdata.rindex("\n", i, j) # Should not fail + self.offset = j-(pos+1) + else: + self.offset = self.offset + j-i + return j + + _decl_otherchars = '' + + # Internal -- parse declaration (for use by subclasses). + def parse_declaration(self, i): + # This is some sort of declaration; in "HTML as + # deployed," this should only be the document type + # declaration (""). + # ISO 8879:1986, however, has more complex + # declaration syntax for elements in , including: + # --comment-- + # [marked section] + # name in the following list: ENTITY, DOCTYPE, ELEMENT, + # ATTLIST, NOTATION, SHORTREF, USEMAP, + # LINKTYPE, LINK, IDLINK, USELINK, SYSTEM + rawdata = self.rawdata + j = i + 2 + assert rawdata[i:j] == "": + # the empty comment + return j + 1 + if rawdata[j:j+1] in ("-", ""): + # Start of comment followed by buffer boundary, + # or just a buffer boundary. + return -1 + # A simple, practical version could look like: ((name|stringlit) S*) + '>' + n = len(rawdata) + if rawdata[j:j+2] == '--': #comment + # Locate --.*-- as the body of the comment + return self.parse_comment(i) + elif rawdata[j] == '[': #marked section + # Locate [statusWord [...arbitrary SGML...]] as the body of the marked section + # Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA + # Note that this is extended by Microsoft Office "Save as Web" function + # to include [if...] and [endif]. + return self.parse_marked_section(i) + else: #all other declaration elements + decltype, j = self._scan_name(j, i) + if j < 0: + return j + if decltype == "doctype": + self._decl_otherchars = '' + while j < n: + c = rawdata[j] + if c == ">": + # end of declaration syntax + data = rawdata[i+2:j] + if decltype == "doctype": + self.handle_decl(data) + else: + # According to the HTML5 specs sections "8.2.4.44 Bogus + # comment state" and "8.2.4.45 Markup declaration open + # state", a comment token should be emitted. + # Calling unknown_decl provides more flexibility though. + self.unknown_decl(data) + return j + 1 + if c in "\"'": + m = _declstringlit_match(rawdata, j) + if not m: + return -1 # incomplete + j = m.end() + elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ": + name, j = self._scan_name(j, i) + elif c in self._decl_otherchars: + j = j + 1 + elif c == "[": + # this could be handled in a separate doctype parser + if decltype == "doctype": + j = self._parse_doctype_subset(j + 1, i) + elif decltype in {"attlist", "linktype", "link", "element"}: + # must tolerate []'d groups in a content model in an element declaration + # also in data attribute specifications of attlist declaration + # also link type declaration subsets in linktype declarations + # also link attribute specification lists in link declarations + raise AssertionError("unsupported '[' char in %s declaration" % decltype) + else: + raise AssertionError("unexpected '[' char in declaration") + else: + raise AssertionError("unexpected %r char in declaration" % rawdata[j]) + if j < 0: + return j + return -1 # incomplete + + # Internal -- parse a marked section + # Override this to handle MS-word extension syntax content + def parse_marked_section(self, i, report=1): + rawdata= self.rawdata + assert rawdata[i:i+3] == ' ending + match= _markedsectionclose.search(rawdata, i+3) + elif sectName in {"if", "else", "endif"}: + # look for MS Office ]> ending + match= _msmarkedsectionclose.search(rawdata, i+3) + else: + raise AssertionError( + 'unknown status keyword %r in marked section' % rawdata[i+3:j] + ) + if not match: + return -1 + if report: + j = match.start(0) + self.unknown_decl(rawdata[i+3: j]) + return match.end(0) + + # Internal -- parse comment, return length or -1 if not terminated + def parse_comment(self, i, report=1): + rawdata = self.rawdata + if rawdata[i:i+4] != ' a, b, c + abc_to_rgb(a, b, c) --> r, g, b + +All inputs and outputs are triples of floats in the range [0.0...1.0] +(with the exception of I and Q, which covers a slightly larger range). +Inputs outside the valid range may cause exceptions or invalid outputs. + +Supported color systems: +RGB: Red, Green, Blue components +YIQ: Luminance, Chrominance (used by composite video signals) +HLS: Hue, Luminance, Saturation +HSV: Hue, Saturation, Value +""" + +# References: +# http://en.wikipedia.org/wiki/YIQ +# http://en.wikipedia.org/wiki/HLS_color_space +# http://en.wikipedia.org/wiki/HSV_color_space + +__all__ = ["rgb_to_yiq","yiq_to_rgb","rgb_to_hls","hls_to_rgb", + "rgb_to_hsv","hsv_to_rgb"] + +# Some floating-point constants + +ONE_THIRD = 1.0/3.0 +ONE_SIXTH = 1.0/6.0 +TWO_THIRD = 2.0/3.0 + +# YIQ: used by composite video signals (linear combinations of RGB) +# Y: perceived grey level (0.0 == black, 1.0 == white) +# I, Q: color components +# +# There are a great many versions of the constants used in these formulae. +# The ones in this library uses constants from the FCC version of NTSC. + +def rgb_to_yiq(r, g, b): + y = 0.30*r + 0.59*g + 0.11*b + i = 0.74*(r-y) - 0.27*(b-y) + q = 0.48*(r-y) + 0.41*(b-y) + return (y, i, q) + +def yiq_to_rgb(y, i, q): + # r = y + (0.27*q + 0.41*i) / (0.74*0.41 + 0.27*0.48) + # b = y + (0.74*q - 0.48*i) / (0.74*0.41 + 0.27*0.48) + # g = y - (0.30*(r-y) + 0.11*(b-y)) / 0.59 + + r = y + 0.9468822170900693*i + 0.6235565819861433*q + g = y - 0.27478764629897834*i - 0.6356910791873801*q + b = y - 1.1085450346420322*i + 1.7090069284064666*q + + if r < 0.0: + r = 0.0 + if g < 0.0: + g = 0.0 + if b < 0.0: + b = 0.0 + if r > 1.0: + r = 1.0 + if g > 1.0: + g = 1.0 + if b > 1.0: + b = 1.0 + return (r, g, b) + + +# HLS: Hue, Luminance, Saturation +# H: position in the spectrum +# L: color lightness +# S: color saturation + +def rgb_to_hls(r, g, b): + maxc = max(r, g, b) + minc = min(r, g, b) + sumc = (maxc+minc) + rangec = (maxc-minc) + l = sumc/2.0 + if minc == maxc: + return 0.0, l, 0.0 + if l <= 0.5: + s = rangec / sumc + else: + s = rangec / (2.0-maxc-minc) # Not always 2.0-sumc: gh-106498. + rc = (maxc-r) / rangec + gc = (maxc-g) / rangec + bc = (maxc-b) / rangec + if r == maxc: + h = bc-gc + elif g == maxc: + h = 2.0+rc-bc + else: + h = 4.0+gc-rc + h = (h/6.0) % 1.0 + return h, l, s + +def hls_to_rgb(h, l, s): + if s == 0.0: + return l, l, l + if l <= 0.5: + m2 = l * (1.0+s) + else: + m2 = l+s-(l*s) + m1 = 2.0*l - m2 + return (_v(m1, m2, h+ONE_THIRD), _v(m1, m2, h), _v(m1, m2, h-ONE_THIRD)) + +def _v(m1, m2, hue): + hue = hue % 1.0 + if hue < ONE_SIXTH: + return m1 + (m2-m1)*hue*6.0 + if hue < 0.5: + return m2 + if hue < TWO_THIRD: + return m1 + (m2-m1)*(TWO_THIRD-hue)*6.0 + return m1 + + +# HSV: Hue, Saturation, Value +# H: position in the spectrum +# S: color saturation ("purity") +# V: color brightness + +def rgb_to_hsv(r, g, b): + maxc = max(r, g, b) + minc = min(r, g, b) + rangec = (maxc-minc) + v = maxc + if minc == maxc: + return 0.0, 0.0, v + s = rangec / maxc + rc = (maxc-r) / rangec + gc = (maxc-g) / rangec + bc = (maxc-b) / rangec + if r == maxc: + h = bc-gc + elif g == maxc: + h = 2.0+rc-bc + else: + h = 4.0+gc-rc + h = (h/6.0) % 1.0 + return h, s, v + +def hsv_to_rgb(h, s, v): + if s == 0.0: + return v, v, v + i = int(h*6.0) # XXX assume int() truncates! + f = (h*6.0) - i + p = v*(1.0 - s) + q = v*(1.0 - s*f) + t = v*(1.0 - s*(1.0-f)) + i = i%6 + if i == 0: + return v, t, p + if i == 1: + return q, v, p + if i == 2: + return p, v, t + if i == 3: + return p, q, v + if i == 4: + return t, p, v + if i == 5: + return v, p, q + # Cannot get here diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/compileall.py b/src/_nfdos/rootfs/usr/lib/python3.13/compileall.py new file mode 100644 index 0000000..67fe370 --- /dev/null +++ b/src/_nfdos/rootfs/usr/lib/python3.13/compileall.py @@ -0,0 +1,472 @@ +"""Module/script to byte-compile all .py files to .pyc files. + +When called as a script with arguments, this compiles the directories +given as arguments recursively; the -l option prevents it from +recursing into directories. + +Without arguments, it compiles all modules on sys.path, without +recursing into subdirectories. (Even though it should do so for +packages -- for now, you'll have to deal with packages separately.) + +See module py_compile for details of the actual byte-compilation. +""" +import os +import sys +import importlib.util +import py_compile +import struct +import filecmp + +from functools import partial +from pathlib import Path + +__all__ = ["compile_dir","compile_file","compile_path"] + +def _walk_dir(dir, maxlevels, quiet=0): + if quiet < 2 and isinstance(dir, os.PathLike): + dir = os.fspath(dir) + if not quiet: + print('Listing {!r}...'.format(dir)) + try: + names = os.listdir(dir) + except OSError: + if quiet < 2: + print("Can't list {!r}".format(dir)) + names = [] + names.sort() + for name in names: + if name == '__pycache__': + continue + fullname = os.path.join(dir, name) + if not os.path.isdir(fullname): + yield fullname + elif (maxlevels > 0 and name != os.curdir and name != os.pardir and + os.path.isdir(fullname) and not os.path.islink(fullname)): + yield from _walk_dir(fullname, maxlevels=maxlevels - 1, + quiet=quiet) + +def compile_dir(dir, maxlevels=None, ddir=None, force=False, + rx=None, quiet=0, legacy=False, optimize=-1, workers=1, + invalidation_mode=None, *, stripdir=None, + prependdir=None, limit_sl_dest=None, hardlink_dupes=False): + """Byte-compile all modules in the given directory tree. + + Arguments (only dir is required): + + dir: the directory to byte-compile + maxlevels: maximum recursion level (default `sys.getrecursionlimit()`) + ddir: the directory that will be prepended to the path to the + file as it is compiled into each byte-code file. + force: if True, force compilation, even if timestamps are up-to-date + quiet: full output with False or 0, errors only with 1, + no output with 2 + legacy: if True, produce legacy pyc paths instead of PEP 3147 paths + optimize: int or list of optimization levels or -1 for level of + the interpreter. Multiple levels leads to multiple compiled + files each with one optimization level. + workers: maximum number of parallel workers + invalidation_mode: how the up-to-dateness of the pyc will be checked + stripdir: part of path to left-strip from source file path + prependdir: path to prepend to beginning of original file path, applied + after stripdir + limit_sl_dest: ignore symlinks if they are pointing outside of + the defined path + hardlink_dupes: hardlink duplicated pyc files + """ + ProcessPoolExecutor = None + if ddir is not None and (stripdir is not None or prependdir is not None): + raise ValueError(("Destination dir (ddir) cannot be used " + "in combination with stripdir or prependdir")) + if ddir is not None: + stripdir = dir + prependdir = ddir + ddir = None + if workers < 0: + raise ValueError('workers must be greater or equal to 0') + if workers != 1: + # Check if this is a system where ProcessPoolExecutor can function. + from concurrent.futures.process import _check_system_limits + try: + _check_system_limits() + except NotImplementedError: + workers = 1 + else: + from concurrent.futures import ProcessPoolExecutor + if maxlevels is None: + maxlevels = sys.getrecursionlimit() + files = _walk_dir(dir, quiet=quiet, maxlevels=maxlevels) + success = True + if workers != 1 and ProcessPoolExecutor is not None: + import multiprocessing + if multiprocessing.get_start_method() == 'fork': + mp_context = multiprocessing.get_context('forkserver') + else: + mp_context = None + # If workers == 0, let ProcessPoolExecutor choose + workers = workers or None + with ProcessPoolExecutor(max_workers=workers, + mp_context=mp_context) as executor: + results = executor.map(partial(compile_file, + ddir=ddir, force=force, + rx=rx, quiet=quiet, + legacy=legacy, + optimize=optimize, + invalidation_mode=invalidation_mode, + stripdir=stripdir, + prependdir=prependdir, + limit_sl_dest=limit_sl_dest, + hardlink_dupes=hardlink_dupes), + files, + chunksize=4) + success = min(results, default=True) + else: + for file in files: + if not compile_file(file, ddir, force, rx, quiet, + legacy, optimize, invalidation_mode, + stripdir=stripdir, prependdir=prependdir, + limit_sl_dest=limit_sl_dest, + hardlink_dupes=hardlink_dupes): + success = False + return success + +def compile_file(fullname, ddir=None, force=False, rx=None, quiet=0, + legacy=False, optimize=-1, + invalidation_mode=None, *, stripdir=None, prependdir=None, + limit_sl_dest=None, hardlink_dupes=False): + """Byte-compile one file. + + Arguments (only fullname is required): + + fullname: the file to byte-compile + ddir: if given, the directory name compiled in to the + byte-code file. + force: if True, force compilation, even if timestamps are up-to-date + quiet: full output with False or 0, errors only with 1, + no output with 2 + legacy: if True, produce legacy pyc paths instead of PEP 3147 paths + optimize: int or list of optimization levels or -1 for level of + the interpreter. Multiple levels leads to multiple compiled + files each with one optimization level. + invalidation_mode: how the up-to-dateness of the pyc will be checked + stripdir: part of path to left-strip from source file path + prependdir: path to prepend to beginning of original file path, applied + after stripdir + limit_sl_dest: ignore symlinks if they are pointing outside of + the defined path. + hardlink_dupes: hardlink duplicated pyc files + """ + + if ddir is not None and (stripdir is not None or prependdir is not None): + raise ValueError(("Destination dir (ddir) cannot be used " + "in combination with stripdir or prependdir")) + + success = True + fullname = os.fspath(fullname) + stripdir = os.fspath(stripdir) if stripdir is not None else None + name = os.path.basename(fullname) + + dfile = None + + if ddir is not None: + dfile = os.path.join(ddir, name) + + if stripdir is not None: + fullname_parts = fullname.split(os.path.sep) + stripdir_parts = stripdir.split(os.path.sep) + + if stripdir_parts != fullname_parts[:len(stripdir_parts)]: + if quiet < 2: + print("The stripdir path {!r} is not a valid prefix for " + "source path {!r}; ignoring".format(stripdir, fullname)) + else: + dfile = os.path.join(*fullname_parts[len(stripdir_parts):]) + + if prependdir is not None: + if dfile is None: + dfile = os.path.join(prependdir, fullname) + else: + dfile = os.path.join(prependdir, dfile) + + if isinstance(optimize, int): + optimize = [optimize] + + # Use set() to remove duplicates. + # Use sorted() to create pyc files in a deterministic order. + optimize = sorted(set(optimize)) + + if hardlink_dupes and len(optimize) < 2: + raise ValueError("Hardlinking of duplicated bytecode makes sense " + "only for more than one optimization level") + + if rx is not None: + mo = rx.search(fullname) + if mo: + return success + + if limit_sl_dest is not None and os.path.islink(fullname): + if Path(limit_sl_dest).resolve() not in Path(fullname).resolve().parents: + return success + + opt_cfiles = {} + + if os.path.isfile(fullname): + for opt_level in optimize: + if legacy: + opt_cfiles[opt_level] = fullname + 'c' + else: + if opt_level >= 0: + opt = opt_level if opt_level >= 1 else '' + cfile = (importlib.util.cache_from_source( + fullname, optimization=opt)) + opt_cfiles[opt_level] = cfile + else: + cfile = importlib.util.cache_from_source(fullname) + opt_cfiles[opt_level] = cfile + + head, tail = name[:-3], name[-3:] + if tail == '.py': + if not force: + try: + mtime = int(os.stat(fullname).st_mtime) + expect = struct.pack('<4sLL', importlib.util.MAGIC_NUMBER, + 0, mtime & 0xFFFF_FFFF) + for cfile in opt_cfiles.values(): + with open(cfile, 'rb') as chandle: + actual = chandle.read(12) + if expect != actual: + break + else: + return success + except OSError: + pass + if not quiet: + print('Compiling {!r}...'.format(fullname)) + try: + for index, opt_level in enumerate(optimize): + cfile = opt_cfiles[opt_level] + ok = py_compile.compile(fullname, cfile, dfile, True, + optimize=opt_level, + invalidation_mode=invalidation_mode) + if index > 0 and hardlink_dupes: + previous_cfile = opt_cfiles[optimize[index - 1]] + if filecmp.cmp(cfile, previous_cfile, shallow=False): + os.unlink(cfile) + os.link(previous_cfile, cfile) + except py_compile.PyCompileError as err: + success = False + if quiet >= 2: + return success + elif quiet: + print('*** Error compiling {!r}...'.format(fullname)) + else: + print('*** ', end='') + # escape non-printable characters in msg + encoding = sys.stdout.encoding or sys.getdefaultencoding() + msg = err.msg.encode(encoding, errors='backslashreplace').decode(encoding) + print(msg) + except (SyntaxError, UnicodeError, OSError) as e: + success = False + if quiet >= 2: + return success + elif quiet: + print('*** Error compiling {!r}...'.format(fullname)) + else: + print('*** ', end='') + print(e.__class__.__name__ + ':', e) + else: + if ok == 0: + success = False + return success + +def compile_path(skip_curdir=1, maxlevels=0, force=False, quiet=0, + legacy=False, optimize=-1, + invalidation_mode=None): + """Byte-compile all module on sys.path. + + Arguments (all optional): + + skip_curdir: if true, skip current directory (default True) + maxlevels: max recursion level (default 0) + force: as for compile_dir() (default False) + quiet: as for compile_dir() (default 0) + legacy: as for compile_dir() (default False) + optimize: as for compile_dir() (default -1) + invalidation_mode: as for compiler_dir() + """ + success = True + for dir in sys.path: + if (not dir or dir == os.curdir) and skip_curdir: + if quiet < 2: + print('Skipping current directory') + else: + success = success and compile_dir( + dir, + maxlevels, + None, + force, + quiet=quiet, + legacy=legacy, + optimize=optimize, + invalidation_mode=invalidation_mode, + ) + return success + + +def main(): + """Script main program.""" + import argparse + + parser = argparse.ArgumentParser( + description='Utilities to support installing Python libraries.', + color=True, + ) + parser.add_argument('-l', action='store_const', const=0, + default=None, dest='maxlevels', + help="don't recurse into subdirectories") + parser.add_argument('-r', type=int, dest='recursion', + help=('control the maximum recursion level. ' + 'if `-l` and `-r` options are specified, ' + 'then `-r` takes precedence.')) + parser.add_argument('-f', action='store_true', dest='force', + help='force rebuild even if timestamps are up to date') + parser.add_argument('-q', action='count', dest='quiet', default=0, + help='output only error messages; -qq will suppress ' + 'the error messages as well.') + parser.add_argument('-b', action='store_true', dest='legacy', + help='use legacy (pre-PEP3147) compiled file locations') + parser.add_argument('-d', metavar='DESTDIR', dest='ddir', default=None, + help=('directory to prepend to file paths for use in ' + 'compile-time tracebacks and in runtime ' + 'tracebacks in cases where the source file is ' + 'unavailable')) + parser.add_argument('-s', metavar='STRIPDIR', dest='stripdir', + default=None, + help=('part of path to left-strip from path ' + 'to source file - for example buildroot. ' + '`-d` and `-s` options cannot be ' + 'specified together.')) + parser.add_argument('-p', metavar='PREPENDDIR', dest='prependdir', + default=None, + help=('path to add as prefix to path ' + 'to source file - for example / to make ' + 'it absolute when some part is removed ' + 'by `-s` option. ' + '`-d` and `-p` options cannot be ' + 'specified together.')) + parser.add_argument('-x', metavar='REGEXP', dest='rx', default=None, + help=('skip files matching the regular expression; ' + 'the regexp is searched for in the full path ' + 'of each file considered for compilation')) + parser.add_argument('-i', metavar='FILE', dest='flist', + help=('add all the files and directories listed in ' + 'FILE to the list considered for compilation; ' + 'if "-", names are read from stdin')) + parser.add_argument('compile_dest', metavar='FILE|DIR', nargs='*', + help=('zero or more file and directory names ' + 'to compile; if no arguments given, defaults ' + 'to the equivalent of -l sys.path')) + parser.add_argument('-j', '--workers', default=1, + type=int, help='Run compileall concurrently') + invalidation_modes = [mode.name.lower().replace('_', '-') + for mode in py_compile.PycInvalidationMode] + parser.add_argument('--invalidation-mode', + choices=sorted(invalidation_modes), + help=('set .pyc invalidation mode; defaults to ' + '"checked-hash" if the SOURCE_DATE_EPOCH ' + 'environment variable is set, and ' + '"timestamp" otherwise.')) + parser.add_argument('-o', action='append', type=int, dest='opt_levels', + help=('Optimization levels to run compilation with. ' + 'Default is -1 which uses the optimization level ' + 'of the Python interpreter itself (see -O).')) + parser.add_argument('-e', metavar='DIR', dest='limit_sl_dest', + help='Ignore symlinks pointing outsite of the DIR') + parser.add_argument('--hardlink-dupes', action='store_true', + dest='hardlink_dupes', + help='Hardlink duplicated pyc files') + + args = parser.parse_args() + compile_dests = args.compile_dest + + if args.rx: + import re + args.rx = re.compile(args.rx) + + if args.limit_sl_dest == "": + args.limit_sl_dest = None + + if args.recursion is not None: + maxlevels = args.recursion + else: + maxlevels = args.maxlevels + + if args.opt_levels is None: + args.opt_levels = [-1] + + if len(args.opt_levels) == 1 and args.hardlink_dupes: + parser.error(("Hardlinking of duplicated bytecode makes sense " + "only for more than one optimization level.")) + + if args.ddir is not None and ( + args.stripdir is not None or args.prependdir is not None + ): + parser.error("-d cannot be used in combination with -s or -p") + + # if flist is provided then load it + if args.flist: + try: + with (sys.stdin if args.flist=='-' else + open(args.flist, encoding="utf-8")) as f: + for line in f: + compile_dests.append(line.strip()) + except OSError: + if args.quiet < 2: + print("Error reading file list {}".format(args.flist)) + return False + + if args.invalidation_mode: + ivl_mode = args.invalidation_mode.replace('-', '_').upper() + invalidation_mode = py_compile.PycInvalidationMode[ivl_mode] + else: + invalidation_mode = None + + success = True + try: + if compile_dests: + for dest in compile_dests: + if os.path.isfile(dest): + if not compile_file(dest, args.ddir, args.force, args.rx, + args.quiet, args.legacy, + invalidation_mode=invalidation_mode, + stripdir=args.stripdir, + prependdir=args.prependdir, + optimize=args.opt_levels, + limit_sl_dest=args.limit_sl_dest, + hardlink_dupes=args.hardlink_dupes): + success = False + else: + if not compile_dir(dest, maxlevels, args.ddir, + args.force, args.rx, args.quiet, + args.legacy, workers=args.workers, + invalidation_mode=invalidation_mode, + stripdir=args.stripdir, + prependdir=args.prependdir, + optimize=args.opt_levels, + limit_sl_dest=args.limit_sl_dest, + hardlink_dupes=args.hardlink_dupes): + success = False + return success + else: + return compile_path(legacy=args.legacy, force=args.force, + quiet=args.quiet, + invalidation_mode=invalidation_mode) + except KeyboardInterrupt: + if args.quiet < 2: + print("\n[interrupted]") + return False + return True + + +if __name__ == '__main__': + exit_status = int(not main()) + sys.exit(exit_status) diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/compression/__init__.py b/src/_nfdos/rootfs/usr/lib/python3.13/compression/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/compression/_common/__init__.py b/src/_nfdos/rootfs/usr/lib/python3.13/compression/_common/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/compression/_common/_streams.py b/src/_nfdos/rootfs/usr/lib/python3.13/compression/_common/_streams.py new file mode 100644 index 0000000..9f367d4 --- /dev/null +++ b/src/_nfdos/rootfs/usr/lib/python3.13/compression/_common/_streams.py @@ -0,0 +1,162 @@ +"""Internal classes used by compression modules""" + +import io +import sys + +BUFFER_SIZE = io.DEFAULT_BUFFER_SIZE # Compressed data read chunk size + + +class BaseStream(io.BufferedIOBase): + """Mode-checking helper functions.""" + + def _check_not_closed(self): + if self.closed: + raise ValueError("I/O operation on closed file") + + def _check_can_read(self): + if not self.readable(): + raise io.UnsupportedOperation("File not open for reading") + + def _check_can_write(self): + if not self.writable(): + raise io.UnsupportedOperation("File not open for writing") + + def _check_can_seek(self): + if not self.readable(): + raise io.UnsupportedOperation("Seeking is only supported " + "on files open for reading") + if not self.seekable(): + raise io.UnsupportedOperation("The underlying file object " + "does not support seeking") + + +class DecompressReader(io.RawIOBase): + """Adapts the decompressor API to a RawIOBase reader API""" + + def readable(self): + return True + + def __init__(self, fp, decomp_factory, trailing_error=(), **decomp_args): + self._fp = fp + self._eof = False + self._pos = 0 # Current offset in decompressed stream + + # Set to size of decompressed stream once it is known, for SEEK_END + self._size = -1 + + # Save the decompressor factory and arguments. + # If the file contains multiple compressed streams, each + # stream will need a separate decompressor object. A new decompressor + # object is also needed when implementing a backwards seek(). + self._decomp_factory = decomp_factory + self._decomp_args = decomp_args + self._decompressor = self._decomp_factory(**self._decomp_args) + + # Exception class to catch from decompressor signifying invalid + # trailing data to ignore + self._trailing_error = trailing_error + + def close(self): + self._decompressor = None + return super().close() + + def seekable(self): + return self._fp.seekable() + + def readinto(self, b): + with memoryview(b) as view, view.cast("B") as byte_view: + data = self.read(len(byte_view)) + byte_view[:len(data)] = data + return len(data) + + def read(self, size=-1): + if size < 0: + return self.readall() + + if not size or self._eof: + return b"" + data = None # Default if EOF is encountered + # Depending on the input data, our call to the decompressor may not + # return any data. In this case, try again after reading another block. + while True: + if self._decompressor.eof: + rawblock = (self._decompressor.unused_data or + self._fp.read(BUFFER_SIZE)) + if not rawblock: + break + # Continue to next stream. + self._decompressor = self._decomp_factory( + **self._decomp_args) + try: + data = self._decompressor.decompress(rawblock, size) + except self._trailing_error: + # Trailing data isn't a valid compressed stream; ignore it. + break + else: + if self._decompressor.needs_input: + rawblock = self._fp.read(BUFFER_SIZE) + if not rawblock: + raise EOFError("Compressed file ended before the " + "end-of-stream marker was reached") + else: + rawblock = b"" + data = self._decompressor.decompress(rawblock, size) + if data: + break + if not data: + self._eof = True + self._size = self._pos + return b"" + self._pos += len(data) + return data + + def readall(self): + chunks = [] + # sys.maxsize means the max length of output buffer is unlimited, + # so that the whole input buffer can be decompressed within one + # .decompress() call. + while data := self.read(sys.maxsize): + chunks.append(data) + + return b"".join(chunks) + + # Rewind the file to the beginning of the data stream. + def _rewind(self): + self._fp.seek(0) + self._eof = False + self._pos = 0 + self._decompressor = self._decomp_factory(**self._decomp_args) + + def seek(self, offset, whence=io.SEEK_SET): + # Recalculate offset as an absolute file position. + if whence == io.SEEK_SET: + pass + elif whence == io.SEEK_CUR: + offset = self._pos + offset + elif whence == io.SEEK_END: + # Seeking relative to EOF - we need to know the file's size. + if self._size < 0: + while self.read(io.DEFAULT_BUFFER_SIZE): + pass + offset = self._size + offset + else: + raise ValueError("Invalid value for whence: {}".format(whence)) + + # Make it so that offset is the number of bytes to skip forward. + if offset < self._pos: + self._rewind() + else: + offset -= self._pos + + # Read and discard data until we reach the desired position. + while offset > 0: + data = self.read(min(io.DEFAULT_BUFFER_SIZE, offset)) + if not data: + break + offset -= len(data) + + return self._pos + + def tell(self): + """Return the current file position.""" + return self._pos diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/compression/bz2.py b/src/_nfdos/rootfs/usr/lib/python3.13/compression/bz2.py new file mode 100644 index 0000000..16815d6 --- /dev/null +++ b/src/_nfdos/rootfs/usr/lib/python3.13/compression/bz2.py @@ -0,0 +1,5 @@ +import bz2 +__doc__ = bz2.__doc__ +del bz2 + +from bz2 import * diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/compression/gzip.py b/src/_nfdos/rootfs/usr/lib/python3.13/compression/gzip.py new file mode 100644 index 0000000..552f48f --- /dev/null +++ b/src/_nfdos/rootfs/usr/lib/python3.13/compression/gzip.py @@ -0,0 +1,5 @@ +import gzip +__doc__ = gzip.__doc__ +del gzip + +from gzip import * diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/compression/lzma.py b/src/_nfdos/rootfs/usr/lib/python3.13/compression/lzma.py new file mode 100644 index 0000000..b4bc7cc --- /dev/null +++ b/src/_nfdos/rootfs/usr/lib/python3.13/compression/lzma.py @@ -0,0 +1,5 @@ +import lzma +__doc__ = lzma.__doc__ +del lzma + +from lzma import * diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/compression/zlib.py b/src/_nfdos/rootfs/usr/lib/python3.13/compression/zlib.py new file mode 100644 index 0000000..3aa7e2d --- /dev/null +++ b/src/_nfdos/rootfs/usr/lib/python3.13/compression/zlib.py @@ -0,0 +1,5 @@ +import zlib +__doc__ = zlib.__doc__ +del zlib + +from zlib import * diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/compression/zstd/__init__.py b/src/_nfdos/rootfs/usr/lib/python3.13/compression/zstd/__init__.py new file mode 100644 index 0000000..84b2591 --- /dev/null +++ b/src/_nfdos/rootfs/usr/lib/python3.13/compression/zstd/__init__.py @@ -0,0 +1,242 @@ +"""Python bindings to the Zstandard (zstd) compression library (RFC-8878).""" + +__all__ = ( + # compression.zstd + 'COMPRESSION_LEVEL_DEFAULT', + 'compress', + 'CompressionParameter', + 'decompress', + 'DecompressionParameter', + 'finalize_dict', + 'get_frame_info', + 'Strategy', + 'train_dict', + + # compression.zstd._zstdfile + 'open', + 'ZstdFile', + + # _zstd + 'get_frame_size', + 'zstd_version', + 'zstd_version_info', + 'ZstdCompressor', + 'ZstdDecompressor', + 'ZstdDict', + 'ZstdError', +) + +import _zstd +import enum +from _zstd import (ZstdCompressor, ZstdDecompressor, ZstdDict, ZstdError, + get_frame_size, zstd_version) +from compression.zstd._zstdfile import ZstdFile, open, _nbytes + +# zstd_version_number is (MAJOR * 100 * 100 + MINOR * 100 + RELEASE) +zstd_version_info = (*divmod(_zstd.zstd_version_number // 100, 100), + _zstd.zstd_version_number % 100) +"""Version number of the runtime zstd library as a tuple of integers.""" + +COMPRESSION_LEVEL_DEFAULT = _zstd.ZSTD_CLEVEL_DEFAULT +"""The default compression level for Zstandard, currently '3'.""" + + +class FrameInfo: + """Information about a Zstandard frame.""" + + __slots__ = 'decompressed_size', 'dictionary_id' + + def __init__(self, decompressed_size, dictionary_id): + super().__setattr__('decompressed_size', decompressed_size) + super().__setattr__('dictionary_id', dictionary_id) + + def __repr__(self): + return (f'FrameInfo(decompressed_size={self.decompressed_size}, ' + f'dictionary_id={self.dictionary_id})') + + def __setattr__(self, name, _): + raise AttributeError(f"can't set attribute {name!r}") + + +def get_frame_info(frame_buffer): + """Get Zstandard frame information from a frame header. + + *frame_buffer* is a bytes-like object. It should start from the beginning + of a frame, and needs to include at least the frame header (6 to 18 bytes). + + The returned FrameInfo object has two attributes. + 'decompressed_size' is the size in bytes of the data in the frame when + decompressed, or None when the decompressed size is unknown. + 'dictionary_id' is an int in the range (0, 2**32). The special value 0 + means that the dictionary ID was not recorded in the frame header, + the frame may or may not need a dictionary to be decoded, + and the ID of such a dictionary is not specified. + """ + return FrameInfo(*_zstd.get_frame_info(frame_buffer)) + + +def train_dict(samples, dict_size): + """Return a ZstdDict representing a trained Zstandard dictionary. + + *samples* is an iterable of samples, where a sample is a bytes-like + object representing a file. + + *dict_size* is the dictionary's maximum size, in bytes. + """ + if not isinstance(dict_size, int): + ds_cls = type(dict_size).__qualname__ + raise TypeError(f'dict_size must be an int object, not {ds_cls!r}.') + + samples = tuple(samples) + chunks = b''.join(samples) + chunk_sizes = tuple(_nbytes(sample) for sample in samples) + if not chunks: + raise ValueError("samples contained no data; can't train dictionary.") + dict_content = _zstd.train_dict(chunks, chunk_sizes, dict_size) + return ZstdDict(dict_content) + + +def finalize_dict(zstd_dict, /, samples, dict_size, level): + """Return a ZstdDict representing a finalized Zstandard dictionary. + + Given a custom content as a basis for dictionary, and a set of samples, + finalize *zstd_dict* by adding headers and statistics according to the + Zstandard dictionary format. + + You may compose an effective dictionary content by hand, which is used as + basis dictionary, and use some samples to finalize a dictionary. The basis + dictionary may be a "raw content" dictionary. See *is_raw* in ZstdDict. + + *samples* is an iterable of samples, where a sample is a bytes-like object + representing a file. + *dict_size* is the dictionary's maximum size, in bytes. + *level* is the expected compression level. The statistics for each + compression level differ, so tuning the dictionary to the compression level + can provide improvements. + """ + + if not isinstance(zstd_dict, ZstdDict): + raise TypeError('zstd_dict argument should be a ZstdDict object.') + if not isinstance(dict_size, int): + raise TypeError('dict_size argument should be an int object.') + if not isinstance(level, int): + raise TypeError('level argument should be an int object.') + + samples = tuple(samples) + chunks = b''.join(samples) + chunk_sizes = tuple(_nbytes(sample) for sample in samples) + if not chunks: + raise ValueError("The samples are empty content, can't finalize the " + "dictionary.") + dict_content = _zstd.finalize_dict(zstd_dict.dict_content, chunks, + chunk_sizes, dict_size, level) + return ZstdDict(dict_content) + + +def compress(data, level=None, options=None, zstd_dict=None): + """Return Zstandard compressed *data* as bytes. + + *level* is an int specifying the compression level to use, defaulting to + COMPRESSION_LEVEL_DEFAULT ('3'). + *options* is a dict object that contains advanced compression + parameters. See CompressionParameter for more on options. + *zstd_dict* is a ZstdDict object, a pre-trained Zstandard dictionary. See + the function train_dict for how to train a ZstdDict on sample data. + + For incremental compression, use a ZstdCompressor instead. + """ + comp = ZstdCompressor(level=level, options=options, zstd_dict=zstd_dict) + return comp.compress(data, mode=ZstdCompressor.FLUSH_FRAME) + + +def decompress(data, zstd_dict=None, options=None): + """Decompress one or more frames of Zstandard compressed *data*. + + *zstd_dict* is a ZstdDict object, a pre-trained Zstandard dictionary. See + the function train_dict for how to train a ZstdDict on sample data. + *options* is a dict object that contains advanced compression + parameters. See DecompressionParameter for more on options. + + For incremental decompression, use a ZstdDecompressor instead. + """ + results = [] + while True: + decomp = ZstdDecompressor(options=options, zstd_dict=zstd_dict) + results.append(decomp.decompress(data)) + if not decomp.eof: + raise ZstdError('Compressed data ended before the ' + 'end-of-stream marker was reached') + data = decomp.unused_data + if not data: + break + return b''.join(results) + + +class CompressionParameter(enum.IntEnum): + """Compression parameters.""" + + compression_level = _zstd.ZSTD_c_compressionLevel + window_log = _zstd.ZSTD_c_windowLog + hash_log = _zstd.ZSTD_c_hashLog + chain_log = _zstd.ZSTD_c_chainLog + search_log = _zstd.ZSTD_c_searchLog + min_match = _zstd.ZSTD_c_minMatch + target_length = _zstd.ZSTD_c_targetLength + strategy = _zstd.ZSTD_c_strategy + + enable_long_distance_matching = _zstd.ZSTD_c_enableLongDistanceMatching + ldm_hash_log = _zstd.ZSTD_c_ldmHashLog + ldm_min_match = _zstd.ZSTD_c_ldmMinMatch + ldm_bucket_size_log = _zstd.ZSTD_c_ldmBucketSizeLog + ldm_hash_rate_log = _zstd.ZSTD_c_ldmHashRateLog + + content_size_flag = _zstd.ZSTD_c_contentSizeFlag + checksum_flag = _zstd.ZSTD_c_checksumFlag + dict_id_flag = _zstd.ZSTD_c_dictIDFlag + + nb_workers = _zstd.ZSTD_c_nbWorkers + job_size = _zstd.ZSTD_c_jobSize + overlap_log = _zstd.ZSTD_c_overlapLog + + def bounds(self): + """Return the (lower, upper) int bounds of a compression parameter. + + Both the lower and upper bounds are inclusive. + """ + return _zstd.get_param_bounds(self.value, is_compress=True) + + +class DecompressionParameter(enum.IntEnum): + """Decompression parameters.""" + + window_log_max = _zstd.ZSTD_d_windowLogMax + + def bounds(self): + """Return the (lower, upper) int bounds of a decompression parameter. + + Both the lower and upper bounds are inclusive. + """ + return _zstd.get_param_bounds(self.value, is_compress=False) + + +class Strategy(enum.IntEnum): + """Compression strategies, listed from fastest to strongest. + + Note that new strategies might be added in the future. + Only the order (from fast to strong) is guaranteed, + the numeric value might change. + """ + + fast = _zstd.ZSTD_fast + dfast = _zstd.ZSTD_dfast + greedy = _zstd.ZSTD_greedy + lazy = _zstd.ZSTD_lazy + lazy2 = _zstd.ZSTD_lazy2 + btlazy2 = _zstd.ZSTD_btlazy2 + btopt = _zstd.ZSTD_btopt + btultra = _zstd.ZSTD_btultra + btultra2 = _zstd.ZSTD_btultra2 + + +# Check validity of the CompressionParameter & DecompressionParameter types +_zstd.set_parameter_types(CompressionParameter, DecompressionParameter) diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/compression/zstd/_zstdfile.py b/src/_nfdos/rootfs/usr/lib/python3.13/compression/zstd/_zstdfile.py new file mode 100644 index 0000000..d709f5e --- /dev/null +++ b/src/_nfdos/rootfs/usr/lib/python3.13/compression/zstd/_zstdfile.py @@ -0,0 +1,345 @@ +import io +from os import PathLike +from _zstd import ZstdCompressor, ZstdDecompressor, ZSTD_DStreamOutSize +from compression._common import _streams + +__all__ = ('ZstdFile', 'open') + +_MODE_CLOSED = 0 +_MODE_READ = 1 +_MODE_WRITE = 2 + + +def _nbytes(dat, /): + if isinstance(dat, (bytes, bytearray)): + return len(dat) + with memoryview(dat) as mv: + return mv.nbytes + + +class ZstdFile(_streams.BaseStream): + """A file-like object providing transparent Zstandard (de)compression. + + A ZstdFile can act as a wrapper for an existing file object, or refer + directly to a named file on disk. + + ZstdFile provides a *binary* file interface. Data is read and returned as + bytes, and may only be written to objects that support the Buffer Protocol. + """ + + FLUSH_BLOCK = ZstdCompressor.FLUSH_BLOCK + FLUSH_FRAME = ZstdCompressor.FLUSH_FRAME + + def __init__(self, file, /, mode='r', *, + level=None, options=None, zstd_dict=None): + """Open a Zstandard compressed file in binary mode. + + *file* can be either an file-like object, or a file name to open. + + *mode* can be 'r' for reading (default), 'w' for (over)writing, 'x' for + creating exclusively, or 'a' for appending. These can equivalently be + given as 'rb', 'wb', 'xb' and 'ab' respectively. + + *level* is an optional int specifying the compression level to use, + or COMPRESSION_LEVEL_DEFAULT if not given. + + *options* is an optional dict for advanced compression parameters. + See CompressionParameter and DecompressionParameter for the possible + options. + + *zstd_dict* is an optional ZstdDict object, a pre-trained Zstandard + dictionary. See train_dict() to train ZstdDict on sample data. + """ + self._fp = None + self._close_fp = False + self._mode = _MODE_CLOSED + self._buffer = None + + if not isinstance(mode, str): + raise ValueError('mode must be a str') + if options is not None and not isinstance(options, dict): + raise TypeError('options must be a dict or None') + mode = mode.removesuffix('b') # handle rb, wb, xb, ab + if mode == 'r': + if level is not None: + raise TypeError('level is illegal in read mode') + self._mode = _MODE_READ + elif mode in {'w', 'a', 'x'}: + if level is not None and not isinstance(level, int): + raise TypeError('level must be int or None') + self._mode = _MODE_WRITE + self._compressor = ZstdCompressor(level=level, options=options, + zstd_dict=zstd_dict) + self._pos = 0 + else: + raise ValueError(f'Invalid mode: {mode!r}') + + if isinstance(file, (str, bytes, PathLike)): + self._fp = io.open(file, f'{mode}b') + self._close_fp = True + elif ((mode == 'r' and hasattr(file, 'read')) + or (mode != 'r' and hasattr(file, 'write'))): + self._fp = file + else: + raise TypeError('file must be a file-like object ' + 'or a str, bytes, or PathLike object') + + if self._mode == _MODE_READ: + raw = _streams.DecompressReader( + self._fp, + ZstdDecompressor, + zstd_dict=zstd_dict, + options=options, + ) + self._buffer = io.BufferedReader(raw) + + def close(self): + """Flush and close the file. + + May be called multiple times. Once the file has been closed, + any other operation on it will raise ValueError. + """ + if self._fp is None: + return + try: + if self._mode == _MODE_READ: + if getattr(self, '_buffer', None): + self._buffer.close() + self._buffer = None + elif self._mode == _MODE_WRITE: + self.flush(self.FLUSH_FRAME) + self._compressor = None + finally: + self._mode = _MODE_CLOSED + try: + if self._close_fp: + self._fp.close() + finally: + self._fp = None + self._close_fp = False + + def write(self, data, /): + """Write a bytes-like object *data* to the file. + + Returns the number of uncompressed bytes written, which is + always the length of data in bytes. Note that due to buffering, + the file on disk may not reflect the data written until .flush() + or .close() is called. + """ + self._check_can_write() + + length = _nbytes(data) + + compressed = self._compressor.compress(data) + self._fp.write(compressed) + self._pos += length + return length + + def flush(self, mode=FLUSH_BLOCK): + """Flush remaining data to the underlying stream. + + The mode argument can be FLUSH_BLOCK or FLUSH_FRAME. Abuse of this + method will reduce compression ratio, use it only when necessary. + + If the program is interrupted afterwards, all data can be recovered. + To ensure saving to disk, also need to use os.fsync(fd). + + This method does nothing in reading mode. + """ + if self._mode == _MODE_READ: + return + self._check_not_closed() + if mode not in {self.FLUSH_BLOCK, self.FLUSH_FRAME}: + raise ValueError('Invalid mode argument, expected either ' + 'ZstdFile.FLUSH_FRAME or ' + 'ZstdFile.FLUSH_BLOCK') + if self._compressor.last_mode == mode: + return + # Flush zstd block/frame, and write. + data = self._compressor.flush(mode) + self._fp.write(data) + if hasattr(self._fp, 'flush'): + self._fp.flush() + + def read(self, size=-1): + """Read up to size uncompressed bytes from the file. + + If size is negative or omitted, read until EOF is reached. + Returns b'' if the file is already at EOF. + """ + if size is None: + size = -1 + self._check_can_read() + return self._buffer.read(size) + + def read1(self, size=-1): + """Read up to size uncompressed bytes, while trying to avoid + making multiple reads from the underlying stream. Reads up to a + buffer's worth of data if size is negative. + + Returns b'' if the file is at EOF. + """ + self._check_can_read() + if size < 0: + # Note this should *not* be io.DEFAULT_BUFFER_SIZE. + # ZSTD_DStreamOutSize is the minimum amount to read guaranteeing + # a full block is read. + size = ZSTD_DStreamOutSize + return self._buffer.read1(size) + + def readinto(self, b): + """Read bytes into b. + + Returns the number of bytes read (0 for EOF). + """ + self._check_can_read() + return self._buffer.readinto(b) + + def readinto1(self, b): + """Read bytes into b, while trying to avoid making multiple reads + from the underlying stream. + + Returns the number of bytes read (0 for EOF). + """ + self._check_can_read() + return self._buffer.readinto1(b) + + def readline(self, size=-1): + """Read a line of uncompressed bytes from the file. + + The terminating newline (if present) is retained. If size is + non-negative, no more than size bytes will be read (in which + case the line may be incomplete). Returns b'' if already at EOF. + """ + self._check_can_read() + return self._buffer.readline(size) + + def seek(self, offset, whence=io.SEEK_SET): + """Change the file position. + + The new position is specified by offset, relative to the + position indicated by whence. Possible values for whence are: + + 0: start of stream (default): offset must not be negative + 1: current stream position + 2: end of stream; offset must not be positive + + Returns the new file position. + + Note that seeking is emulated, so depending on the arguments, + this operation may be extremely slow. + """ + self._check_can_read() + + # BufferedReader.seek() checks seekable + return self._buffer.seek(offset, whence) + + def peek(self, size=-1): + """Return buffered data without advancing the file position. + + Always returns at least one byte of data, unless at EOF. + The exact number of bytes returned is unspecified. + """ + # Relies on the undocumented fact that BufferedReader.peek() always + # returns at least one byte (except at EOF) + self._check_can_read() + return self._buffer.peek(size) + + def __next__(self): + if ret := self._buffer.readline(): + return ret + raise StopIteration + + def tell(self): + """Return the current file position.""" + self._check_not_closed() + if self._mode == _MODE_READ: + return self._buffer.tell() + elif self._mode == _MODE_WRITE: + return self._pos + + def fileno(self): + """Return the file descriptor for the underlying file.""" + self._check_not_closed() + return self._fp.fileno() + + @property + def name(self): + self._check_not_closed() + return self._fp.name + + @property + def mode(self): + return 'wb' if self._mode == _MODE_WRITE else 'rb' + + @property + def closed(self): + """True if this file is closed.""" + return self._mode == _MODE_CLOSED + + def seekable(self): + """Return whether the file supports seeking.""" + return self.readable() and self._buffer.seekable() + + def readable(self): + """Return whether the file was opened for reading.""" + self._check_not_closed() + return self._mode == _MODE_READ + + def writable(self): + """Return whether the file was opened for writing.""" + self._check_not_closed() + return self._mode == _MODE_WRITE + + +def open(file, /, mode='rb', *, level=None, options=None, zstd_dict=None, + encoding=None, errors=None, newline=None): + """Open a Zstandard compressed file in binary or text mode. + + file can be either a file name (given as a str, bytes, or PathLike object), + in which case the named file is opened, or it can be an existing file object + to read from or write to. + + The mode parameter can be 'r', 'rb' (default), 'w', 'wb', 'x', 'xb', 'a', + 'ab' for binary mode, or 'rt', 'wt', 'xt', 'at' for text mode. + + The level, options, and zstd_dict parameters specify the settings the same + as ZstdFile. + + When using read mode (decompression), the options parameter is a dict + representing advanced decompression options. The level parameter is not + supported in this case. When using write mode (compression), only one of + level, an int representing the compression level, or options, a dict + representing advanced compression options, may be passed. In both modes, + zstd_dict is a ZstdDict instance containing a trained Zstandard dictionary. + + For binary mode, this function is equivalent to the ZstdFile constructor: + ZstdFile(filename, mode, ...). In this case, the encoding, errors and + newline parameters must not be provided. + + For text mode, an ZstdFile object is created, and wrapped in an + io.TextIOWrapper instance with the specified encoding, error handling + behavior, and line ending(s). + """ + + text_mode = 't' in mode + mode = mode.replace('t', '') + + if text_mode: + if 'b' in mode: + raise ValueError(f'Invalid mode: {mode!r}') + else: + if encoding is not None: + raise ValueError('Argument "encoding" not supported in binary mode') + if errors is not None: + raise ValueError('Argument "errors" not supported in binary mode') + if newline is not None: + raise ValueError('Argument "newline" not supported in binary mode') + + binary_file = ZstdFile(file, mode, level=level, options=options, + zstd_dict=zstd_dict) + + if text_mode: + return io.TextIOWrapper(binary_file, encoding, errors, newline) + else: + return binary_file diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/concurrent/__init__.py b/src/_nfdos/rootfs/usr/lib/python3.13/concurrent/__init__.py new file mode 100644 index 0000000..196d378 --- /dev/null +++ b/src/_nfdos/rootfs/usr/lib/python3.13/concurrent/__init__.py @@ -0,0 +1 @@ +# This directory is a Python package. diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/concurrent/futures/__init__.py b/src/_nfdos/rootfs/usr/lib/python3.13/concurrent/futures/__init__.py new file mode 100644 index 0000000..d6ac4b3 --- /dev/null +++ b/src/_nfdos/rootfs/usr/lib/python3.13/concurrent/futures/__init__.py @@ -0,0 +1,65 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Execute computations asynchronously using threads or processes.""" + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +from concurrent.futures._base import (FIRST_COMPLETED, + FIRST_EXCEPTION, + ALL_COMPLETED, + CancelledError, + TimeoutError, + InvalidStateError, + BrokenExecutor, + Future, + Executor, + wait, + as_completed) + +__all__ = [ + 'FIRST_COMPLETED', + 'FIRST_EXCEPTION', + 'ALL_COMPLETED', + 'CancelledError', + 'TimeoutError', + 'InvalidStateError', + 'BrokenExecutor', + 'Future', + 'Executor', + 'wait', + 'as_completed', + 'ProcessPoolExecutor', + 'ThreadPoolExecutor', +] + + +try: + import _interpreters +except ImportError: + _interpreters = None + +if _interpreters: + __all__.append('InterpreterPoolExecutor') + + +def __dir__(): + return __all__ + ['__author__', '__doc__'] + + +def __getattr__(name): + global ProcessPoolExecutor, ThreadPoolExecutor, InterpreterPoolExecutor + + if name == 'ProcessPoolExecutor': + from .process import ProcessPoolExecutor + return ProcessPoolExecutor + + if name == 'ThreadPoolExecutor': + from .thread import ThreadPoolExecutor + return ThreadPoolExecutor + + if _interpreters and name == 'InterpreterPoolExecutor': + from .interpreter import InterpreterPoolExecutor + return InterpreterPoolExecutor + + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/concurrent/futures/_base.py b/src/_nfdos/rootfs/usr/lib/python3.13/concurrent/futures/_base.py new file mode 100644 index 0000000..f506ce6 --- /dev/null +++ b/src/_nfdos/rootfs/usr/lib/python3.13/concurrent/futures/_base.py @@ -0,0 +1,701 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +import collections +import logging +import threading +import time +import types +import weakref +from itertools import islice + +FIRST_COMPLETED = 'FIRST_COMPLETED' +FIRST_EXCEPTION = 'FIRST_EXCEPTION' +ALL_COMPLETED = 'ALL_COMPLETED' +_AS_COMPLETED = '_AS_COMPLETED' + +# Possible future states (for internal use by the futures package). +PENDING = 'PENDING' +RUNNING = 'RUNNING' +# The future was cancelled by the user... +CANCELLED = 'CANCELLED' +# ...and _Waiter.add_cancelled() was called by a worker. +CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED' +FINISHED = 'FINISHED' + +_STATE_TO_DESCRIPTION_MAP = { + PENDING: "pending", + RUNNING: "running", + CANCELLED: "cancelled", + CANCELLED_AND_NOTIFIED: "cancelled", + FINISHED: "finished" +} + +# Logger for internal use by the futures package. +LOGGER = logging.getLogger("concurrent.futures") + +class Error(Exception): + """Base class for all future-related exceptions.""" + pass + +class CancelledError(Error): + """The Future was cancelled.""" + pass + +TimeoutError = TimeoutError # make local alias for the standard exception + +class InvalidStateError(Error): + """The operation is not allowed in this state.""" + pass + +class _Waiter(object): + """Provides the event that wait() and as_completed() block on.""" + def __init__(self): + self.event = threading.Event() + self.finished_futures = [] + + def add_result(self, future): + self.finished_futures.append(future) + + def add_exception(self, future): + self.finished_futures.append(future) + + def add_cancelled(self, future): + self.finished_futures.append(future) + +class _AsCompletedWaiter(_Waiter): + """Used by as_completed().""" + + def __init__(self): + super(_AsCompletedWaiter, self).__init__() + self.lock = threading.Lock() + + def add_result(self, future): + with self.lock: + super(_AsCompletedWaiter, self).add_result(future) + self.event.set() + + def add_exception(self, future): + with self.lock: + super(_AsCompletedWaiter, self).add_exception(future) + self.event.set() + + def add_cancelled(self, future): + with self.lock: + super(_AsCompletedWaiter, self).add_cancelled(future) + self.event.set() + +class _FirstCompletedWaiter(_Waiter): + """Used by wait(return_when=FIRST_COMPLETED).""" + + def add_result(self, future): + super().add_result(future) + self.event.set() + + def add_exception(self, future): + super().add_exception(future) + self.event.set() + + def add_cancelled(self, future): + super().add_cancelled(future) + self.event.set() + +class _AllCompletedWaiter(_Waiter): + """Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED).""" + + def __init__(self, num_pending_calls, stop_on_exception): + self.num_pending_calls = num_pending_calls + self.stop_on_exception = stop_on_exception + self.lock = threading.Lock() + super().__init__() + + def _decrement_pending_calls(self): + with self.lock: + self.num_pending_calls -= 1 + if not self.num_pending_calls: + self.event.set() + + def add_result(self, future): + super().add_result(future) + self._decrement_pending_calls() + + def add_exception(self, future): + super().add_exception(future) + if self.stop_on_exception: + self.event.set() + else: + self._decrement_pending_calls() + + def add_cancelled(self, future): + super().add_cancelled(future) + self._decrement_pending_calls() + +class _AcquireFutures(object): + """A context manager that does an ordered acquire of Future conditions.""" + + def __init__(self, futures): + self.futures = sorted(futures, key=id) + + def __enter__(self): + for future in self.futures: + future._condition.acquire() + + def __exit__(self, *args): + for future in self.futures: + future._condition.release() + +def _create_and_install_waiters(fs, return_when): + if return_when == _AS_COMPLETED: + waiter = _AsCompletedWaiter() + elif return_when == FIRST_COMPLETED: + waiter = _FirstCompletedWaiter() + else: + pending_count = sum( + f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs) + + if return_when == FIRST_EXCEPTION: + waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True) + elif return_when == ALL_COMPLETED: + waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False) + else: + raise ValueError("Invalid return condition: %r" % return_when) + + for f in fs: + f._waiters.append(waiter) + + return waiter + + +def _yield_finished_futures(fs, waiter, ref_collect): + """ + Iterate on the list *fs*, yielding finished futures one by one in + reverse order. + Before yielding a future, *waiter* is removed from its waiters + and the future is removed from each set in the collection of sets + *ref_collect*. + + The aim of this function is to avoid keeping stale references after + the future is yielded and before the iterator resumes. + """ + while fs: + f = fs[-1] + for futures_set in ref_collect: + futures_set.remove(f) + with f._condition: + f._waiters.remove(waiter) + del f + # Careful not to keep a reference to the popped value + yield fs.pop() + + +def as_completed(fs, timeout=None): + """An iterator over the given futures that yields each as it completes. + + Args: + fs: The sequence of Futures (possibly created by different Executors) to + iterate over. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + + Returns: + An iterator that yields the given Futures as they complete (finished or + cancelled). If any given Futures are duplicated, they will be returned + once. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + """ + if timeout is not None: + end_time = timeout + time.monotonic() + + fs = set(fs) + total_futures = len(fs) + with _AcquireFutures(fs): + finished = set( + f for f in fs + if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) + pending = fs - finished + waiter = _create_and_install_waiters(fs, _AS_COMPLETED) + finished = list(finished) + try: + yield from _yield_finished_futures(finished, waiter, + ref_collect=(fs,)) + + while pending: + if timeout is None: + wait_timeout = None + else: + wait_timeout = end_time - time.monotonic() + if wait_timeout < 0: + raise TimeoutError( + '%d (of %d) futures unfinished' % ( + len(pending), total_futures)) + + waiter.event.wait(wait_timeout) + + with waiter.lock: + finished = waiter.finished_futures + waiter.finished_futures = [] + waiter.event.clear() + + # reverse to keep finishing order + finished.reverse() + yield from _yield_finished_futures(finished, waiter, + ref_collect=(fs, pending)) + + finally: + # Remove waiter from unfinished futures + for f in fs: + with f._condition: + f._waiters.remove(waiter) + +DoneAndNotDoneFutures = collections.namedtuple( + 'DoneAndNotDoneFutures', 'done not_done') +def wait(fs, timeout=None, return_when=ALL_COMPLETED): + """Wait for the futures in the given sequence to complete. + + Args: + fs: The sequence of Futures (possibly created by different Executors) to + wait upon. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + return_when: Indicates when this function should return. The options + are: + + FIRST_COMPLETED - Return when any future finishes or is + cancelled. + FIRST_EXCEPTION - Return when any future finishes by raising an + exception. If no future raises an exception + then it is equivalent to ALL_COMPLETED. + ALL_COMPLETED - Return when all futures finish or are cancelled. + + Returns: + A named 2-tuple of sets. The first set, named 'done', contains the + futures that completed (is finished or cancelled) before the wait + completed. The second set, named 'not_done', contains uncompleted + futures. Duplicate futures given to *fs* are removed and will be + returned only once. + """ + fs = set(fs) + with _AcquireFutures(fs): + done = {f for f in fs + if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]} + not_done = fs - done + if (return_when == FIRST_COMPLETED) and done: + return DoneAndNotDoneFutures(done, not_done) + elif (return_when == FIRST_EXCEPTION) and done: + if any(f for f in done + if not f.cancelled() and f.exception() is not None): + return DoneAndNotDoneFutures(done, not_done) + + if len(done) == len(fs): + return DoneAndNotDoneFutures(done, not_done) + + waiter = _create_and_install_waiters(fs, return_when) + + waiter.event.wait(timeout) + for f in fs: + with f._condition: + f._waiters.remove(waiter) + + done.update(waiter.finished_futures) + return DoneAndNotDoneFutures(done, fs - done) + + +def _result_or_cancel(fut, timeout=None): + try: + try: + return fut.result(timeout) + finally: + fut.cancel() + finally: + # Break a reference cycle with the exception in self._exception + del fut + + +class Future(object): + """Represents the result of an asynchronous computation.""" + + def __init__(self): + """Initializes the future. Should not be called by clients.""" + self._condition = threading.Condition() + self._state = PENDING + self._result = None + self._exception = None + self._waiters = [] + self._done_callbacks = [] + + def _invoke_callbacks(self): + for callback in self._done_callbacks: + try: + callback(self) + except Exception: + LOGGER.exception('exception calling callback for %r', self) + + def __repr__(self): + with self._condition: + if self._state == FINISHED: + if self._exception: + return '<%s at %#x state=%s raised %s>' % ( + self.__class__.__name__, + id(self), + _STATE_TO_DESCRIPTION_MAP[self._state], + self._exception.__class__.__name__) + else: + return '<%s at %#x state=%s returned %s>' % ( + self.__class__.__name__, + id(self), + _STATE_TO_DESCRIPTION_MAP[self._state], + self._result.__class__.__name__) + return '<%s at %#x state=%s>' % ( + self.__class__.__name__, + id(self), + _STATE_TO_DESCRIPTION_MAP[self._state]) + + def cancel(self): + """Cancel the future if possible. + + Returns True if the future was cancelled, False otherwise. A future + cannot be cancelled if it is running or has already completed. + """ + with self._condition: + if self._state in [RUNNING, FINISHED]: + return False + + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + return True + + self._state = CANCELLED + self._condition.notify_all() + + self._invoke_callbacks() + return True + + def cancelled(self): + """Return True if the future was cancelled.""" + with self._condition: + return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED] + + def running(self): + """Return True if the future is currently executing.""" + with self._condition: + return self._state == RUNNING + + def done(self): + """Return True if the future was cancelled or finished executing.""" + with self._condition: + return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED] + + def __get_result(self): + if self._exception is not None: + try: + raise self._exception + finally: + # Break a reference cycle with the exception in self._exception + self = None + else: + return self._result + + def add_done_callback(self, fn): + """Attaches a callable that will be called when the future finishes. + + Args: + fn: A callable that will be called with this future as its only + argument when the future completes or is cancelled. The callable + will always be called by a thread in the same process in which + it was added. If the future has already completed or been + cancelled then the callable will be called immediately. These + callables are called in the order that they were added. + """ + with self._condition: + if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]: + self._done_callbacks.append(fn) + return + try: + fn(self) + except Exception: + LOGGER.exception('exception calling callback for %r', self) + + def result(self, timeout=None): + """Return the result of the call that the future represents. + + Args: + timeout: The number of seconds to wait for the result if the future + isn't done. If None, then there is no limit on the wait time. + + Returns: + The result of the call that the future represents. + + Raises: + CancelledError: If the future was cancelled. + TimeoutError: If the future didn't finish executing before the given + timeout. + Exception: If the call raised then that exception will be raised. + """ + try: + with self._condition: + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self.__get_result() + + self._condition.wait(timeout) + + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self.__get_result() + else: + raise TimeoutError() + finally: + # Break a reference cycle with the exception in self._exception + self = None + + def exception(self, timeout=None): + """Return the exception raised by the call that the future represents. + + Args: + timeout: The number of seconds to wait for the exception if the + future isn't done. If None, then there is no limit on the wait + time. + + Returns: + The exception raised by the call that the future represents or None + if the call completed without raising. + + Raises: + CancelledError: If the future was cancelled. + TimeoutError: If the future didn't finish executing before the given + timeout. + """ + + with self._condition: + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self._exception + + self._condition.wait(timeout) + + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self._exception + else: + raise TimeoutError() + + # The following methods should only be used by Executors and in tests. + def set_running_or_notify_cancel(self): + """Mark the future as running or process any cancel notifications. + + Should only be used by Executor implementations and unit tests. + + If the future has been cancelled (cancel() was called and returned + True) then any threads waiting on the future completing (though calls + to as_completed() or wait()) are notified and False is returned. + + If the future was not cancelled then it is put in the running state + (future calls to running() will return True) and True is returned. + + This method should be called by Executor implementations before + executing the work associated with this future. If this method returns + False then the work should not be executed. + + Returns: + False if the Future was cancelled, True otherwise. + + Raises: + RuntimeError: if this method was already called or if set_result() + or set_exception() was called. + """ + with self._condition: + if self._state == CANCELLED: + self._state = CANCELLED_AND_NOTIFIED + for waiter in self._waiters: + waiter.add_cancelled(self) + # self._condition.notify_all() is not necessary because + # self.cancel() triggers a notification. + return False + elif self._state == PENDING: + self._state = RUNNING + return True + else: + LOGGER.critical('Future %s in unexpected state: %s', + id(self), + self._state) + raise RuntimeError('Future in unexpected state') + + def set_result(self, result): + """Sets the return value of work associated with the future. + + Should only be used by Executor implementations and unit tests. + """ + with self._condition: + if self._state in {CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED}: + raise InvalidStateError('{}: {!r}'.format(self._state, self)) + self._result = result + self._state = FINISHED + for waiter in self._waiters: + waiter.add_result(self) + self._condition.notify_all() + self._invoke_callbacks() + + def set_exception(self, exception): + """Sets the result of the future as being the given exception. + + Should only be used by Executor implementations and unit tests. + """ + with self._condition: + if self._state in {CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED}: + raise InvalidStateError('{}: {!r}'.format(self._state, self)) + self._exception = exception + self._state = FINISHED + for waiter in self._waiters: + waiter.add_exception(self) + self._condition.notify_all() + self._invoke_callbacks() + + def _get_snapshot(self): + """Get a snapshot of the future's current state. + + This method atomically retrieves the state in one lock acquisition, + which is significantly faster than multiple method calls. + + Returns: + Tuple of (done, cancelled, result, exception) + - done: True if the future is done (cancelled or finished) + - cancelled: True if the future was cancelled + - result: The result if available and not cancelled + - exception: The exception if available and not cancelled + """ + # Fast path: check if already finished without lock + if self._state == FINISHED: + return True, False, self._result, self._exception + + # Need lock for other states since they can change + with self._condition: + # We have to check the state again after acquiring the lock + # because it may have changed in the meantime. + if self._state == FINISHED: + return True, False, self._result, self._exception + if self._state in {CANCELLED, CANCELLED_AND_NOTIFIED}: + return True, True, None, None + return False, False, None, None + + __class_getitem__ = classmethod(types.GenericAlias) + +class Executor(object): + """This is an abstract base class for concrete asynchronous executors.""" + + def submit(self, fn, /, *args, **kwargs): + """Submits a callable to be executed with the given arguments. + + Schedules the callable to be executed as fn(*args, **kwargs) and returns + a Future instance representing the execution of the callable. + + Returns: + A Future representing the given call. + """ + raise NotImplementedError() + + def map(self, fn, *iterables, timeout=None, chunksize=1, buffersize=None): + """Returns an iterator equivalent to map(fn, iter). + + Args: + fn: A callable that will take as many arguments as there are + passed iterables. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + chunksize: The size of the chunks the iterable will be broken into + before being passed to a child process. This argument is only + used by ProcessPoolExecutor; it is ignored by + ThreadPoolExecutor. + buffersize: The number of submitted tasks whose results have not + yet been yielded. If the buffer is full, iteration over the + iterables pauses until a result is yielded from the buffer. + If None, all input elements are eagerly collected, and a task is + submitted for each. + + Returns: + An iterator equivalent to: map(func, *iterables) but the calls may + be evaluated out-of-order. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + Exception: If fn(*args) raises for any values. + """ + if buffersize is not None and not isinstance(buffersize, int): + raise TypeError("buffersize must be an integer or None") + if buffersize is not None and buffersize < 1: + raise ValueError("buffersize must be None or > 0") + + if timeout is not None: + end_time = timeout + time.monotonic() + + zipped_iterables = zip(*iterables) + if buffersize: + fs = collections.deque( + self.submit(fn, *args) for args in islice(zipped_iterables, buffersize) + ) + else: + fs = [self.submit(fn, *args) for args in zipped_iterables] + + # Use a weak reference to ensure that the executor can be garbage + # collected independently of the result_iterator closure. + executor_weakref = weakref.ref(self) + + # Yield must be hidden in closure so that the futures are submitted + # before the first iterator value is required. + def result_iterator(): + try: + # reverse to keep finishing order + fs.reverse() + while fs: + if ( + buffersize + and (executor := executor_weakref()) + and (args := next(zipped_iterables, None)) + ): + fs.appendleft(executor.submit(fn, *args)) + # Careful not to keep a reference to the popped future + if timeout is None: + yield _result_or_cancel(fs.pop()) + else: + yield _result_or_cancel(fs.pop(), end_time - time.monotonic()) + finally: + for future in fs: + future.cancel() + return result_iterator() + + def shutdown(self, wait=True, *, cancel_futures=False): + """Clean-up the resources associated with the Executor. + + It is safe to call this method several times. Otherwise, no other + methods can be called after this one. + + Args: + wait: If True then shutdown will not return until all running + futures have finished executing and the resources used by the + executor have been reclaimed. + cancel_futures: If True then shutdown will cancel all pending + futures. Futures that are completed or running will not be + cancelled. + """ + pass + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.shutdown(wait=True) + return False + + +class BrokenExecutor(RuntimeError): + """ + Raised when a executor has become non-functional after a severe failure. + """ diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/concurrent/futures/interpreter.py b/src/_nfdos/rootfs/usr/lib/python3.13/concurrent/futures/interpreter.py new file mode 100644 index 0000000..53c6e75 --- /dev/null +++ b/src/_nfdos/rootfs/usr/lib/python3.13/concurrent/futures/interpreter.py @@ -0,0 +1,124 @@ +"""Implements InterpreterPoolExecutor.""" + +from concurrent import interpreters +import sys +import textwrap +from . import thread as _thread +import traceback + + +def do_call(results, func, args, kwargs): + try: + return func(*args, **kwargs) + except BaseException as exc: + # Send the captured exception out on the results queue, + # but still leave it unhandled for the interpreter to handle. + try: + results.put(exc) + except interpreters.NotShareableError: + # The exception is not shareable. + print('exception is not shareable:', file=sys.stderr) + traceback.print_exception(exc) + results.put(None) + raise # re-raise + + +class WorkerContext(_thread.WorkerContext): + + @classmethod + def prepare(cls, initializer, initargs): + def resolve_task(fn, args, kwargs): + if isinstance(fn, str): + # XXX Circle back to this later. + raise TypeError('scripts not supported') + else: + task = (fn, args, kwargs) + return task + + if initializer is not None: + try: + initdata = resolve_task(initializer, initargs, {}) + except ValueError: + if isinstance(initializer, str) and initargs: + raise ValueError(f'an initializer script does not take args, got {initargs!r}') + raise # re-raise + else: + initdata = None + def create_context(): + return cls(initdata) + return create_context, resolve_task + + def __init__(self, initdata): + self.initdata = initdata + self.interp = None + self.results = None + + def __del__(self): + if self.interp is not None: + self.finalize() + + def initialize(self): + assert self.interp is None, self.interp + self.interp = interpreters.create() + try: + maxsize = 0 + self.results = interpreters.create_queue(maxsize) + + if self.initdata: + self.run(self.initdata) + except BaseException: + self.finalize() + raise # re-raise + + def finalize(self): + interp = self.interp + results = self.results + self.results = None + self.interp = None + if results is not None: + del results + if interp is not None: + interp.close() + + def run(self, task): + try: + return self.interp.call(do_call, self.results, *task) + except interpreters.ExecutionFailed as wrapper: + # Wait for the exception data to show up. + exc = self.results.get() + if exc is None: + # The exception must have been not shareable. + raise # re-raise + raise exc from wrapper + + +class BrokenInterpreterPool(_thread.BrokenThreadPool): + """ + Raised when a worker thread in an InterpreterPoolExecutor failed initializing. + """ + + +class InterpreterPoolExecutor(_thread.ThreadPoolExecutor): + + BROKEN = BrokenInterpreterPool + + @classmethod + def prepare_context(cls, initializer, initargs): + return WorkerContext.prepare(initializer, initargs) + + def __init__(self, max_workers=None, thread_name_prefix='', + initializer=None, initargs=()): + """Initializes a new InterpreterPoolExecutor instance. + + Args: + max_workers: The maximum number of interpreters that can be used to + execute the given calls. + thread_name_prefix: An optional name prefix to give our threads. + initializer: A callable or script used to initialize + each worker interpreter. + initargs: A tuple of arguments to pass to the initializer. + """ + thread_name_prefix = (thread_name_prefix or + (f"InterpreterPoolExecutor-{self._counter()}")) + super().__init__(max_workers, thread_name_prefix, + initializer, initargs) diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/concurrent/futures/process.py b/src/_nfdos/rootfs/usr/lib/python3.13/concurrent/futures/process.py new file mode 100644 index 0000000..a14650b --- /dev/null +++ b/src/_nfdos/rootfs/usr/lib/python3.13/concurrent/futures/process.py @@ -0,0 +1,939 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Implements ProcessPoolExecutor. + +The following diagram and text describe the data-flow through the system: + +|======================= In-process =====================|== Out-of-process ==| + ++----------+ +----------+ +--------+ +-----------+ +---------+ +| | => | Work Ids | | | | Call Q | | Process | +| | +----------+ | | +-----------+ | Pool | +| | | ... | | | | ... | +---------+ +| | | 6 | => | | => | 5, call() | => | | +| | | 7 | | | | ... | | | +| Process | | ... | | Local | +-----------+ | Process | +| Pool | +----------+ | Worker | | #1..n | +| Executor | | Thread | | | +| | +----------- + | | +-----------+ | | +| | <=> | Work Items | <=> | | <= | Result Q | <= | | +| | +------------+ | | +-----------+ | | +| | | 6: call() | | | | ... | | | +| | | future | | | | 4, result | | | +| | | ... | | | | 3, except | | | ++----------+ +------------+ +--------+ +-----------+ +---------+ + +Executor.submit() called: +- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict +- adds the id of the _WorkItem to the "Work Ids" queue + +Local worker thread: +- reads work ids from the "Work Ids" queue and looks up the corresponding + WorkItem from the "Work Items" dict: if the work item has been cancelled then + it is simply removed from the dict, otherwise it is repackaged as a + _CallItem and put in the "Call Q". New _CallItems are put in the "Call Q" + until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because + calls placed in the "Call Q" can no longer be cancelled with Future.cancel(). +- reads _ResultItems from "Result Q", updates the future stored in the + "Work Items" dict and deletes the dict entry + +Process #1..n: +- reads _CallItems from "Call Q", executes the calls, and puts the resulting + _ResultItems in "Result Q" +""" + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +import os +from concurrent.futures import _base +import queue +import multiprocessing as mp +# This import is required to load the multiprocessing.connection submodule +# so that it can be accessed later as `mp.connection` +import multiprocessing.connection +from multiprocessing.queues import Queue +import threading +import weakref +from functools import partial +import itertools +import sys +from traceback import format_exception + + +_threads_wakeups = weakref.WeakKeyDictionary() +_global_shutdown = False + + +class _ThreadWakeup: + def __init__(self): + self._closed = False + self._lock = threading.Lock() + self._reader, self._writer = mp.Pipe(duplex=False) + + def close(self): + # Please note that we do not take the self._lock when + # calling clear() (to avoid deadlocking) so this method can + # only be called safely from the same thread as all calls to + # clear() even if you hold the lock. Otherwise we + # might try to read from the closed pipe. + with self._lock: + if not self._closed: + self._closed = True + self._writer.close() + self._reader.close() + + def wakeup(self): + with self._lock: + if not self._closed: + self._writer.send_bytes(b"") + + def clear(self): + if self._closed: + raise RuntimeError('operation on closed _ThreadWakeup') + while self._reader.poll(): + self._reader.recv_bytes() + + +def _python_exit(): + global _global_shutdown + _global_shutdown = True + items = list(_threads_wakeups.items()) + for _, thread_wakeup in items: + # call not protected by ProcessPoolExecutor._shutdown_lock + thread_wakeup.wakeup() + for t, _ in items: + t.join() + +# Register for `_python_exit()` to be called just before joining all +# non-daemon threads. This is used instead of `atexit.register()` for +# compatibility with subinterpreters, which no longer support daemon threads. +# See bpo-39812 for context. +threading._register_atexit(_python_exit) + +# Controls how many more calls than processes will be queued in the call queue. +# A smaller number will mean that processes spend more time idle waiting for +# work while a larger number will make Future.cancel() succeed less frequently +# (Futures in the call queue cannot be cancelled). +EXTRA_QUEUED_CALLS = 1 + + +# On Windows, WaitForMultipleObjects is used to wait for processes to finish. +# It can wait on, at most, 63 objects. There is an overhead of two objects: +# - the result queue reader +# - the thread wakeup reader +_MAX_WINDOWS_WORKERS = 63 - 2 + +# Hack to embed stringification of remote traceback in local traceback + +class _RemoteTraceback(Exception): + def __init__(self, tb): + self.tb = tb + def __str__(self): + return self.tb + +class _ExceptionWithTraceback: + def __init__(self, exc, tb): + tb = ''.join(format_exception(type(exc), exc, tb)) + self.exc = exc + # Traceback object needs to be garbage-collected as its frames + # contain references to all the objects in the exception scope + self.exc.__traceback__ = None + self.tb = '\n"""\n%s"""' % tb + def __reduce__(self): + return _rebuild_exc, (self.exc, self.tb) + +def _rebuild_exc(exc, tb): + exc.__cause__ = _RemoteTraceback(tb) + return exc + +class _WorkItem(object): + def __init__(self, future, fn, args, kwargs): + self.future = future + self.fn = fn + self.args = args + self.kwargs = kwargs + +class _ResultItem(object): + def __init__(self, work_id, exception=None, result=None, exit_pid=None): + self.work_id = work_id + self.exception = exception + self.result = result + self.exit_pid = exit_pid + +class _CallItem(object): + def __init__(self, work_id, fn, args, kwargs): + self.work_id = work_id + self.fn = fn + self.args = args + self.kwargs = kwargs + + +class _SafeQueue(Queue): + """Safe Queue set exception to the future object linked to a job""" + def __init__(self, max_size=0, *, ctx, pending_work_items, thread_wakeup): + self.pending_work_items = pending_work_items + self.thread_wakeup = thread_wakeup + super().__init__(max_size, ctx=ctx) + + def _on_queue_feeder_error(self, e, obj): + if isinstance(obj, _CallItem): + tb = format_exception(type(e), e, e.__traceback__) + e.__cause__ = _RemoteTraceback('\n"""\n{}"""'.format(''.join(tb))) + work_item = self.pending_work_items.pop(obj.work_id, None) + self.thread_wakeup.wakeup() + # work_item can be None if another process terminated. In this + # case, the executor_manager_thread fails all work_items + # with BrokenProcessPool + if work_item is not None: + work_item.future.set_exception(e) + else: + super()._on_queue_feeder_error(e, obj) + + +def _process_chunk(fn, chunk): + """ Processes a chunk of an iterable passed to map. + + Runs the function passed to map() on a chunk of the + iterable passed to map. + + This function is run in a separate process. + + """ + return [fn(*args) for args in chunk] + + +def _sendback_result(result_queue, work_id, result=None, exception=None, + exit_pid=None): + """Safely send back the given result or exception""" + try: + result_queue.put(_ResultItem(work_id, result=result, + exception=exception, exit_pid=exit_pid)) + except BaseException as e: + exc = _ExceptionWithTraceback(e, e.__traceback__) + result_queue.put(_ResultItem(work_id, exception=exc, + exit_pid=exit_pid)) + + +def _process_worker(call_queue, result_queue, initializer, initargs, max_tasks=None): + """Evaluates calls from call_queue and places the results in result_queue. + + This worker is run in a separate process. + + Args: + call_queue: A ctx.Queue of _CallItems that will be read and + evaluated by the worker. + result_queue: A ctx.Queue of _ResultItems that will written + to by the worker. + initializer: A callable initializer, or None + initargs: A tuple of args for the initializer + """ + if initializer is not None: + try: + initializer(*initargs) + except BaseException: + _base.LOGGER.critical('Exception in initializer:', exc_info=True) + # The parent will notice that the process stopped and + # mark the pool broken + return + num_tasks = 0 + exit_pid = None + while True: + call_item = call_queue.get(block=True) + if call_item is None: + # Wake up queue management thread + result_queue.put(os.getpid()) + return + + if max_tasks is not None: + num_tasks += 1 + if num_tasks >= max_tasks: + exit_pid = os.getpid() + + try: + r = call_item.fn(*call_item.args, **call_item.kwargs) + except BaseException as e: + exc = _ExceptionWithTraceback(e, e.__traceback__) + _sendback_result(result_queue, call_item.work_id, exception=exc, + exit_pid=exit_pid) + else: + _sendback_result(result_queue, call_item.work_id, result=r, + exit_pid=exit_pid) + del r + + # Liberate the resource as soon as possible, to avoid holding onto + # open files or shared memory that is not needed anymore + del call_item + + if exit_pid is not None: + return + + +class _ExecutorManagerThread(threading.Thread): + """Manages the communication between this process and the worker processes. + + The manager is run in a local thread. + + Args: + executor: A reference to the ProcessPoolExecutor that owns + this thread. A weakref will be own by the manager as well as + references to internal objects used to introspect the state of + the executor. + """ + + def __init__(self, executor): + # Store references to necessary internals of the executor. + + # A _ThreadWakeup to allow waking up the queue_manager_thread from the + # main Thread and avoid deadlocks caused by permanently locked queues. + self.thread_wakeup = executor._executor_manager_thread_wakeup + self.shutdown_lock = executor._shutdown_lock + + # A weakref.ref to the ProcessPoolExecutor that owns this thread. Used + # to determine if the ProcessPoolExecutor has been garbage collected + # and that the manager can exit. + # When the executor gets garbage collected, the weakref callback + # will wake up the queue management thread so that it can terminate + # if there is no pending work item. + def weakref_cb(_, + thread_wakeup=self.thread_wakeup, + mp_util_debug=mp.util.debug): + mp_util_debug('Executor collected: triggering callback for' + ' QueueManager wakeup') + thread_wakeup.wakeup() + + self.executor_reference = weakref.ref(executor, weakref_cb) + + # A list of the ctx.Process instances used as workers. + self.processes = executor._processes + + # A ctx.Queue that will be filled with _CallItems derived from + # _WorkItems for processing by the process workers. + self.call_queue = executor._call_queue + + # A ctx.SimpleQueue of _ResultItems generated by the process workers. + self.result_queue = executor._result_queue + + # A queue.Queue of work ids e.g. Queue([5, 6, ...]). + self.work_ids_queue = executor._work_ids + + # Maximum number of tasks a worker process can execute before + # exiting safely + self.max_tasks_per_child = executor._max_tasks_per_child + + # A dict mapping work ids to _WorkItems e.g. + # {5: <_WorkItem...>, 6: <_WorkItem...>, ...} + self.pending_work_items = executor._pending_work_items + + super().__init__() + + def run(self): + # Main loop for the executor manager thread. + + while True: + # gh-109047: During Python finalization, self.call_queue.put() + # creation of a thread can fail with RuntimeError. + try: + self.add_call_item_to_queue() + except BaseException as exc: + cause = format_exception(exc) + self.terminate_broken(cause) + return + + result_item, is_broken, cause = self.wait_result_broken_or_wakeup() + + if is_broken: + self.terminate_broken(cause) + return + if result_item is not None: + self.process_result_item(result_item) + + process_exited = result_item.exit_pid is not None + if process_exited: + p = self.processes.pop(result_item.exit_pid) + p.join() + + # Delete reference to result_item to avoid keeping references + # while waiting on new results. + del result_item + + if executor := self.executor_reference(): + if process_exited: + with self.shutdown_lock: + executor._adjust_process_count() + else: + executor._idle_worker_semaphore.release() + del executor + + if self.is_shutting_down(): + self.flag_executor_shutting_down() + + # When only canceled futures remain in pending_work_items, our + # next call to wait_result_broken_or_wakeup would hang forever. + # This makes sure we have some running futures or none at all. + self.add_call_item_to_queue() + + # Since no new work items can be added, it is safe to shutdown + # this thread if there are no pending work items. + if not self.pending_work_items: + self.join_executor_internals() + return + + def add_call_item_to_queue(self): + # Fills call_queue with _WorkItems from pending_work_items. + # This function never blocks. + while True: + if self.call_queue.full(): + return + try: + work_id = self.work_ids_queue.get(block=False) + except queue.Empty: + return + else: + work_item = self.pending_work_items[work_id] + + if work_item.future.set_running_or_notify_cancel(): + self.call_queue.put(_CallItem(work_id, + work_item.fn, + work_item.args, + work_item.kwargs), + block=True) + else: + del self.pending_work_items[work_id] + continue + + def wait_result_broken_or_wakeup(self): + # Wait for a result to be ready in the result_queue while checking + # that all worker processes are still running, or for a wake up + # signal send. The wake up signals come either from new tasks being + # submitted, from the executor being shutdown/gc-ed, or from the + # shutdown of the python interpreter. + result_reader = self.result_queue._reader + assert not self.thread_wakeup._closed + wakeup_reader = self.thread_wakeup._reader + readers = [result_reader, wakeup_reader] + worker_sentinels = [p.sentinel for p in list(self.processes.values())] + ready = mp.connection.wait(readers + worker_sentinels) + + cause = None + is_broken = True + result_item = None + if result_reader in ready: + try: + result_item = result_reader.recv() + is_broken = False + except BaseException as exc: + cause = format_exception(exc) + + elif wakeup_reader in ready: + is_broken = False + + self.thread_wakeup.clear() + + return result_item, is_broken, cause + + def process_result_item(self, result_item): + # Process the received a result_item. This can be either the PID of a + # worker that exited gracefully or a _ResultItem + + # Received a _ResultItem so mark the future as completed. + work_item = self.pending_work_items.pop(result_item.work_id, None) + # work_item can be None if another process terminated (see above) + if work_item is not None: + if result_item.exception is not None: + work_item.future.set_exception(result_item.exception) + else: + work_item.future.set_result(result_item.result) + + def is_shutting_down(self): + # Check whether we should start shutting down the executor. + executor = self.executor_reference() + # No more work items can be added if: + # - The interpreter is shutting down OR + # - The executor that owns this worker has been collected OR + # - The executor that owns this worker has been shutdown. + return (_global_shutdown or executor is None + or executor._shutdown_thread) + + def _terminate_broken(self, cause): + # Terminate the executor because it is in a broken state. The cause + # argument can be used to display more information on the error that + # lead the executor into becoming broken. + + # Mark the process pool broken so that submits fail right now. + executor = self.executor_reference() + if executor is not None: + executor._broken = ('A child process terminated ' + 'abruptly, the process pool is not ' + 'usable anymore') + executor._shutdown_thread = True + executor = None + + # All pending tasks are to be marked failed with the following + # BrokenProcessPool error + bpe = BrokenProcessPool("A process in the process pool was " + "terminated abruptly while the future was " + "running or pending.") + if cause is not None: + bpe.__cause__ = _RemoteTraceback( + f"\n'''\n{''.join(cause)}'''") + + # Mark pending tasks as failed. + for work_id, work_item in self.pending_work_items.items(): + try: + work_item.future.set_exception(bpe) + except _base.InvalidStateError: + # set_exception() fails if the future is cancelled: ignore it. + # Trying to check if the future is cancelled before calling + # set_exception() would leave a race condition if the future is + # cancelled between the check and set_exception(). + pass + # Delete references to object. See issue16284 + del work_item + self.pending_work_items.clear() + + # Terminate remaining workers forcibly: the queues or their + # locks may be in a dirty state and block forever. + for p in self.processes.values(): + p.terminate() + + self.call_queue._terminate_broken() + + # clean up resources + self._join_executor_internals(broken=True) + + def terminate_broken(self, cause): + with self.shutdown_lock: + self._terminate_broken(cause) + + def flag_executor_shutting_down(self): + # Flag the executor as shutting down and cancel remaining tasks if + # requested as early as possible if it is not gc-ed yet. + executor = self.executor_reference() + if executor is not None: + executor._shutdown_thread = True + # Cancel pending work items if requested. + if executor._cancel_pending_futures: + # Cancel all pending futures and update pending_work_items + # to only have futures that are currently running. + new_pending_work_items = {} + for work_id, work_item in self.pending_work_items.items(): + if not work_item.future.cancel(): + new_pending_work_items[work_id] = work_item + self.pending_work_items = new_pending_work_items + # Drain work_ids_queue since we no longer need to + # add items to the call queue. + while True: + try: + self.work_ids_queue.get_nowait() + except queue.Empty: + break + # Make sure we do this only once to not waste time looping + # on running processes over and over. + executor._cancel_pending_futures = False + + def shutdown_workers(self): + n_children_to_stop = self.get_n_children_alive() + n_sentinels_sent = 0 + # Send the right number of sentinels, to make sure all children are + # properly terminated. + while (n_sentinels_sent < n_children_to_stop + and self.get_n_children_alive() > 0): + for i in range(n_children_to_stop - n_sentinels_sent): + try: + self.call_queue.put_nowait(None) + n_sentinels_sent += 1 + except queue.Full: + break + + def join_executor_internals(self): + with self.shutdown_lock: + self._join_executor_internals() + + def _join_executor_internals(self, broken=False): + # If broken, call_queue was closed and so can no longer be used. + if not broken: + self.shutdown_workers() + + # Release the queue's resources as soon as possible. + self.call_queue.close() + self.call_queue.join_thread() + self.thread_wakeup.close() + + # If .join() is not called on the created processes then + # some ctx.Queue methods may deadlock on Mac OS X. + for p in self.processes.values(): + if broken: + p.terminate() + p.join() + + def get_n_children_alive(self): + # This is an upper bound on the number of children alive. + return sum(p.is_alive() for p in self.processes.values()) + + +_system_limits_checked = False +_system_limited = None + + +def _check_system_limits(): + global _system_limits_checked, _system_limited + if _system_limits_checked: + if _system_limited: + raise NotImplementedError(_system_limited) + _system_limits_checked = True + try: + import multiprocessing.synchronize # noqa: F401 + except ImportError: + _system_limited = ( + "This Python build lacks multiprocessing.synchronize, usually due " + "to named semaphores being unavailable on this platform." + ) + raise NotImplementedError(_system_limited) + try: + nsems_max = os.sysconf("SC_SEM_NSEMS_MAX") + except (AttributeError, ValueError): + # sysconf not available or setting not available + return + if nsems_max == -1: + # indetermined limit, assume that limit is determined + # by available memory only + return + if nsems_max >= 256: + # minimum number of semaphores available + # according to POSIX + return + _system_limited = ("system provides too few semaphores (%d" + " available, 256 necessary)" % nsems_max) + raise NotImplementedError(_system_limited) + + +def _chain_from_iterable_of_lists(iterable): + """ + Specialized implementation of itertools.chain.from_iterable. + Each item in *iterable* should be a list. This function is + careful not to keep references to yielded objects. + """ + for element in iterable: + element.reverse() + while element: + yield element.pop() + + +class BrokenProcessPool(_base.BrokenExecutor): + """ + Raised when a process in a ProcessPoolExecutor terminated abruptly + while a future was in the running state. + """ + +_TERMINATE = "terminate" +_KILL = "kill" + +_SHUTDOWN_CALLBACK_OPERATION = { + _TERMINATE, + _KILL +} + + +class ProcessPoolExecutor(_base.Executor): + def __init__(self, max_workers=None, mp_context=None, + initializer=None, initargs=(), *, max_tasks_per_child=None): + """Initializes a new ProcessPoolExecutor instance. + + Args: + max_workers: The maximum number of processes that can be used to + execute the given calls. If None or not given then as many + worker processes will be created as the machine has processors. + mp_context: A multiprocessing context to launch the workers created + using the multiprocessing.get_context('start method') API. This + object should provide SimpleQueue, Queue and Process. + initializer: A callable used to initialize worker processes. + initargs: A tuple of arguments to pass to the initializer. + max_tasks_per_child: The maximum number of tasks a worker process + can complete before it will exit and be replaced with a fresh + worker process. The default of None means worker process will + live as long as the executor. Requires a non-'fork' mp_context + start method. When given, we default to using 'spawn' if no + mp_context is supplied. + """ + _check_system_limits() + + if max_workers is None: + self._max_workers = os.process_cpu_count() or 1 + if sys.platform == 'win32': + self._max_workers = min(_MAX_WINDOWS_WORKERS, + self._max_workers) + else: + if max_workers <= 0: + raise ValueError("max_workers must be greater than 0") + elif (sys.platform == 'win32' and + max_workers > _MAX_WINDOWS_WORKERS): + raise ValueError( + f"max_workers must be <= {_MAX_WINDOWS_WORKERS}") + + self._max_workers = max_workers + + if mp_context is None: + if max_tasks_per_child is not None: + mp_context = mp.get_context("spawn") + else: + mp_context = mp.get_context() + self._mp_context = mp_context + + # https://github.com/python/cpython/issues/90622 + self._safe_to_dynamically_spawn_children = ( + self._mp_context.get_start_method(allow_none=False) != "fork") + + if initializer is not None and not callable(initializer): + raise TypeError("initializer must be a callable") + self._initializer = initializer + self._initargs = initargs + + if max_tasks_per_child is not None: + if not isinstance(max_tasks_per_child, int): + raise TypeError("max_tasks_per_child must be an integer") + elif max_tasks_per_child <= 0: + raise ValueError("max_tasks_per_child must be >= 1") + if self._mp_context.get_start_method(allow_none=False) == "fork": + # https://github.com/python/cpython/issues/90622 + raise ValueError("max_tasks_per_child is incompatible with" + " the 'fork' multiprocessing start method;" + " supply a different mp_context.") + self._max_tasks_per_child = max_tasks_per_child + + # Management thread + self._executor_manager_thread = None + + # Map of pids to processes + self._processes = {} + + # Shutdown is a two-step process. + self._shutdown_thread = False + self._shutdown_lock = threading.Lock() + self._idle_worker_semaphore = threading.Semaphore(0) + self._broken = False + self._queue_count = 0 + self._pending_work_items = {} + self._cancel_pending_futures = False + + # _ThreadWakeup is a communication channel used to interrupt the wait + # of the main loop of executor_manager_thread from another thread (e.g. + # when calling executor.submit or executor.shutdown). We do not use the + # _result_queue to send wakeup signals to the executor_manager_thread + # as it could result in a deadlock if a worker process dies with the + # _result_queue write lock still acquired. + # + # Care must be taken to only call clear and close from the + # executor_manager_thread, since _ThreadWakeup.clear() is not protected + # by a lock. + self._executor_manager_thread_wakeup = _ThreadWakeup() + + # Create communication channels for the executor + # Make the call queue slightly larger than the number of processes to + # prevent the worker processes from idling. But don't make it too big + # because futures in the call queue cannot be cancelled. + queue_size = self._max_workers + EXTRA_QUEUED_CALLS + self._call_queue = _SafeQueue( + max_size=queue_size, ctx=self._mp_context, + pending_work_items=self._pending_work_items, + thread_wakeup=self._executor_manager_thread_wakeup) + # Killed worker processes can produce spurious "broken pipe" + # tracebacks in the queue's own worker thread. But we detect killed + # processes anyway, so silence the tracebacks. + self._call_queue._ignore_epipe = True + self._result_queue = mp_context.SimpleQueue() + self._work_ids = queue.Queue() + + def _start_executor_manager_thread(self): + if self._executor_manager_thread is None: + # Start the processes so that their sentinels are known. + if not self._safe_to_dynamically_spawn_children: # ie, using fork. + self._launch_processes() + self._executor_manager_thread = _ExecutorManagerThread(self) + self._executor_manager_thread.start() + _threads_wakeups[self._executor_manager_thread] = \ + self._executor_manager_thread_wakeup + + def _adjust_process_count(self): + # gh-132969: avoid error when state is reset and executor is still running, + # which will happen when shutdown(wait=False) is called. + if self._processes is None: + return + + # if there's an idle process, we don't need to spawn a new one. + if self._idle_worker_semaphore.acquire(blocking=False): + return + + process_count = len(self._processes) + if process_count < self._max_workers: + # Assertion disabled as this codepath is also used to replace a + # worker that unexpectedly dies, even when using the 'fork' start + # method. That means there is still a potential deadlock bug. If a + # 'fork' mp_context worker dies, we'll be forking a new one when + # we know a thread is running (self._executor_manager_thread). + #assert self._safe_to_dynamically_spawn_children or not self._executor_manager_thread, 'https://github.com/python/cpython/issues/90622' + self._spawn_process() + + def _launch_processes(self): + # https://github.com/python/cpython/issues/90622 + assert not self._executor_manager_thread, ( + 'Processes cannot be fork()ed after the thread has started, ' + 'deadlock in the child processes could result.') + for _ in range(len(self._processes), self._max_workers): + self._spawn_process() + + def _spawn_process(self): + p = self._mp_context.Process( + target=_process_worker, + args=(self._call_queue, + self._result_queue, + self._initializer, + self._initargs, + self._max_tasks_per_child)) + p.start() + self._processes[p.pid] = p + + def submit(self, fn, /, *args, **kwargs): + with self._shutdown_lock: + if self._broken: + raise BrokenProcessPool(self._broken) + if self._shutdown_thread: + raise RuntimeError('cannot schedule new futures after shutdown') + if _global_shutdown: + raise RuntimeError('cannot schedule new futures after ' + 'interpreter shutdown') + + f = _base.Future() + w = _WorkItem(f, fn, args, kwargs) + + self._pending_work_items[self._queue_count] = w + self._work_ids.put(self._queue_count) + self._queue_count += 1 + # Wake up queue management thread + self._executor_manager_thread_wakeup.wakeup() + + if self._safe_to_dynamically_spawn_children: + self._adjust_process_count() + self._start_executor_manager_thread() + return f + submit.__doc__ = _base.Executor.submit.__doc__ + + def map(self, fn, *iterables, timeout=None, chunksize=1, buffersize=None): + """Returns an iterator equivalent to map(fn, iter). + + Args: + fn: A callable that will take as many arguments as there are + passed iterables. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + chunksize: If greater than one, the iterables will be chopped into + chunks of size chunksize and submitted to the process pool. + If set to one, the items in the list will be sent one at a time. + buffersize: The number of submitted tasks whose results have not + yet been yielded. If the buffer is full, iteration over the + iterables pauses until a result is yielded from the buffer. + If None, all input elements are eagerly collected, and a task is + submitted for each. + + Returns: + An iterator equivalent to: map(func, *iterables) but the calls may + be evaluated out-of-order. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + Exception: If fn(*args) raises for any values. + """ + if chunksize < 1: + raise ValueError("chunksize must be >= 1.") + + results = super().map(partial(_process_chunk, fn), + itertools.batched(zip(*iterables), chunksize), + timeout=timeout, + buffersize=buffersize) + return _chain_from_iterable_of_lists(results) + + def shutdown(self, wait=True, *, cancel_futures=False): + with self._shutdown_lock: + self._cancel_pending_futures = cancel_futures + self._shutdown_thread = True + if self._executor_manager_thread_wakeup is not None: + # Wake up queue management thread + self._executor_manager_thread_wakeup.wakeup() + + if self._executor_manager_thread is not None and wait: + self._executor_manager_thread.join() + # To reduce the risk of opening too many files, remove references to + # objects that use file descriptors. + self._executor_manager_thread = None + self._call_queue = None + if self._result_queue is not None and wait: + self._result_queue.close() + self._result_queue = None + self._processes = None + self._executor_manager_thread_wakeup = None + + shutdown.__doc__ = _base.Executor.shutdown.__doc__ + + def _force_shutdown(self, operation): + """Attempts to terminate or kill the executor's workers based off the + given operation. Iterates through all of the current processes and + performs the relevant task if the process is still alive. + + After terminating workers, the pool will be in a broken state + and no longer usable (for instance, new tasks should not be + submitted). + """ + if operation not in _SHUTDOWN_CALLBACK_OPERATION: + raise ValueError(f"Unsupported operation: {operation!r}") + + processes = {} + if self._processes: + processes = self._processes.copy() + + # shutdown will invalidate ._processes, so we copy it right before + # calling. If we waited here, we would deadlock if a process decides not + # to exit. + self.shutdown(wait=False, cancel_futures=True) + + if not processes: + return + + for proc in processes.values(): + try: + if not proc.is_alive(): + continue + except ValueError: + # The process is already exited/closed out. + continue + + try: + if operation == _TERMINATE: + proc.terminate() + elif operation == _KILL: + proc.kill() + except ProcessLookupError: + # The process just ended before our signal + continue + + def terminate_workers(self): + """Attempts to terminate the executor's workers. + Iterates through all of the current worker processes and terminates + each one that is still alive. + + After terminating workers, the pool will be in a broken state + and no longer usable (for instance, new tasks should not be + submitted). + """ + return self._force_shutdown(operation=_TERMINATE) + + def kill_workers(self): + """Attempts to kill the executor's workers. + Iterates through all of the current worker processes and kills + each one that is still alive. + + After killing workers, the pool will be in a broken state + and no longer usable (for instance, new tasks should not be + submitted). + """ + return self._force_shutdown(operation=_KILL) diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/concurrent/futures/thread.py b/src/_nfdos/rootfs/usr/lib/python3.13/concurrent/futures/thread.py new file mode 100644 index 0000000..909359b --- /dev/null +++ b/src/_nfdos/rootfs/usr/lib/python3.13/concurrent/futures/thread.py @@ -0,0 +1,274 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Implements ThreadPoolExecutor.""" + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +from concurrent.futures import _base +import itertools +import queue +import threading +import types +import weakref +import os + + +_threads_queues = weakref.WeakKeyDictionary() +_shutdown = False +# Lock that ensures that new workers are not created while the interpreter is +# shutting down. Must be held while mutating _threads_queues and _shutdown. +_global_shutdown_lock = threading.Lock() + +def _python_exit(): + global _shutdown + with _global_shutdown_lock: + _shutdown = True + items = list(_threads_queues.items()) + for t, q in items: + q.put(None) + for t, q in items: + t.join() + +# Register for `_python_exit()` to be called just before joining all +# non-daemon threads. This is used instead of `atexit.register()` for +# compatibility with subinterpreters, which no longer support daemon threads. +# See bpo-39812 for context. +threading._register_atexit(_python_exit) + +# At fork, reinitialize the `_global_shutdown_lock` lock in the child process +if hasattr(os, 'register_at_fork'): + os.register_at_fork(before=_global_shutdown_lock.acquire, + after_in_child=_global_shutdown_lock._at_fork_reinit, + after_in_parent=_global_shutdown_lock.release) + os.register_at_fork(after_in_child=_threads_queues.clear) + + +class WorkerContext: + + @classmethod + def prepare(cls, initializer, initargs): + if initializer is not None: + if not callable(initializer): + raise TypeError("initializer must be a callable") + def create_context(): + return cls(initializer, initargs) + def resolve_task(fn, args, kwargs): + return (fn, args, kwargs) + return create_context, resolve_task + + def __init__(self, initializer, initargs): + self.initializer = initializer + self.initargs = initargs + + def initialize(self): + if self.initializer is not None: + self.initializer(*self.initargs) + + def finalize(self): + pass + + def run(self, task): + fn, args, kwargs = task + return fn(*args, **kwargs) + + +class _WorkItem: + def __init__(self, future, task): + self.future = future + self.task = task + + def run(self, ctx): + if not self.future.set_running_or_notify_cancel(): + return + + try: + result = ctx.run(self.task) + except BaseException as exc: + self.future.set_exception(exc) + # Break a reference cycle with the exception 'exc' + self = None + else: + self.future.set_result(result) + + __class_getitem__ = classmethod(types.GenericAlias) + + +def _worker(executor_reference, ctx, work_queue): + try: + ctx.initialize() + except BaseException: + _base.LOGGER.critical('Exception in initializer:', exc_info=True) + executor = executor_reference() + if executor is not None: + executor._initializer_failed() + return + try: + while True: + try: + work_item = work_queue.get_nowait() + except queue.Empty: + # attempt to increment idle count if queue is empty + executor = executor_reference() + if executor is not None: + executor._idle_semaphore.release() + del executor + work_item = work_queue.get(block=True) + + if work_item is not None: + work_item.run(ctx) + # Delete references to object. See GH-60488 + del work_item + continue + + executor = executor_reference() + # Exit if: + # - The interpreter is shutting down OR + # - The executor that owns the worker has been collected OR + # - The executor that owns the worker has been shutdown. + if _shutdown or executor is None or executor._shutdown: + # Flag the executor as shutting down as early as possible if it + # is not gc-ed yet. + if executor is not None: + executor._shutdown = True + # Notice other workers + work_queue.put(None) + return + del executor + except BaseException: + _base.LOGGER.critical('Exception in worker', exc_info=True) + finally: + ctx.finalize() + + +class BrokenThreadPool(_base.BrokenExecutor): + """ + Raised when a worker thread in a ThreadPoolExecutor failed initializing. + """ + + +class ThreadPoolExecutor(_base.Executor): + + BROKEN = BrokenThreadPool + + # Used to assign unique thread names when thread_name_prefix is not supplied. + _counter = itertools.count().__next__ + + @classmethod + def prepare_context(cls, initializer, initargs): + return WorkerContext.prepare(initializer, initargs) + + def __init__(self, max_workers=None, thread_name_prefix='', + initializer=None, initargs=(), **ctxkwargs): + """Initializes a new ThreadPoolExecutor instance. + + Args: + max_workers: The maximum number of threads that can be used to + execute the given calls. + thread_name_prefix: An optional name prefix to give our threads. + initializer: A callable used to initialize worker threads. + initargs: A tuple of arguments to pass to the initializer. + ctxkwargs: Additional arguments to cls.prepare_context(). + """ + if max_workers is None: + # ThreadPoolExecutor is often used to: + # * CPU bound task which releases GIL + # * I/O bound task (which releases GIL, of course) + # + # We use process_cpu_count + 4 for both types of tasks. + # But we limit it to 32 to avoid consuming surprisingly large resource + # on many core machine. + max_workers = min(32, (os.process_cpu_count() or 1) + 4) + if max_workers <= 0: + raise ValueError("max_workers must be greater than 0") + + (self._create_worker_context, + self._resolve_work_item_task, + ) = type(self).prepare_context(initializer, initargs, **ctxkwargs) + + self._max_workers = max_workers + self._work_queue = queue.SimpleQueue() + self._idle_semaphore = threading.Semaphore(0) + self._threads = set() + self._broken = False + self._shutdown = False + self._shutdown_lock = threading.Lock() + self._thread_name_prefix = (thread_name_prefix or + ("ThreadPoolExecutor-%d" % self._counter())) + + def submit(self, fn, /, *args, **kwargs): + with self._shutdown_lock, _global_shutdown_lock: + if self._broken: + raise self.BROKEN(self._broken) + + if self._shutdown: + raise RuntimeError('cannot schedule new futures after shutdown') + if _shutdown: + raise RuntimeError('cannot schedule new futures after ' + 'interpreter shutdown') + + f = _base.Future() + task = self._resolve_work_item_task(fn, args, kwargs) + w = _WorkItem(f, task) + + self._work_queue.put(w) + self._adjust_thread_count() + return f + submit.__doc__ = _base.Executor.submit.__doc__ + + def _adjust_thread_count(self): + # if idle threads are available, don't spin new threads + if self._idle_semaphore.acquire(timeout=0): + return + + # When the executor gets lost, the weakref callback will wake up + # the worker threads. + def weakref_cb(_, q=self._work_queue): + q.put(None) + + num_threads = len(self._threads) + if num_threads < self._max_workers: + thread_name = '%s_%d' % (self._thread_name_prefix or self, + num_threads) + t = threading.Thread(name=thread_name, target=_worker, + args=(weakref.ref(self, weakref_cb), + self._create_worker_context(), + self._work_queue)) + t.start() + self._threads.add(t) + _threads_queues[t] = self._work_queue + + def _initializer_failed(self): + with self._shutdown_lock: + self._broken = ('A thread initializer failed, the thread pool ' + 'is not usable anymore') + # Drain work queue and mark pending futures failed + while True: + try: + work_item = self._work_queue.get_nowait() + except queue.Empty: + break + if work_item is not None: + work_item.future.set_exception(self.BROKEN(self._broken)) + + def shutdown(self, wait=True, *, cancel_futures=False): + with self._shutdown_lock: + self._shutdown = True + if cancel_futures: + # Drain all work items from the queue, and then cancel their + # associated futures. + while True: + try: + work_item = self._work_queue.get_nowait() + except queue.Empty: + break + if work_item is not None: + work_item.future.cancel() + + # Send a wake-up to prevent threads calling + # _work_queue.get(block=True) from permanently blocking. + self._work_queue.put(None) + if wait: + for t in self._threads: + t.join() + shutdown.__doc__ = _base.Executor.shutdown.__doc__ diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/concurrent/interpreters/__init__.py b/src/_nfdos/rootfs/usr/lib/python3.13/concurrent/interpreters/__init__.py new file mode 100644 index 0000000..ea4147e --- /dev/null +++ b/src/_nfdos/rootfs/usr/lib/python3.13/concurrent/interpreters/__init__.py @@ -0,0 +1,247 @@ +"""Subinterpreters High Level Module.""" + +import threading +import weakref +import _interpreters + +# aliases: +from _interpreters import ( + InterpreterError, InterpreterNotFoundError, NotShareableError, + is_shareable, +) +from ._queues import ( + create as create_queue, + Queue, QueueEmpty, QueueFull, +) + + +__all__ = [ + 'get_current', 'get_main', 'create', 'list_all', 'is_shareable', + 'Interpreter', + 'InterpreterError', 'InterpreterNotFoundError', 'ExecutionFailed', + 'NotShareableError', + 'create_queue', 'Queue', 'QueueEmpty', 'QueueFull', +] + + +_EXEC_FAILURE_STR = """ +{superstr} + +Uncaught in the interpreter: + +{formatted} +""".strip() + +class ExecutionFailed(InterpreterError): + """An unhandled exception happened during execution. + + This is raised from Interpreter.exec() and Interpreter.call(). + """ + + def __init__(self, excinfo): + msg = excinfo.formatted + if not msg: + if excinfo.type and excinfo.msg: + msg = f'{excinfo.type.__name__}: {excinfo.msg}' + else: + msg = excinfo.type.__name__ or excinfo.msg + super().__init__(msg) + self.excinfo = excinfo + + def __str__(self): + try: + formatted = self.excinfo.errdisplay + except Exception: + return super().__str__() + else: + return _EXEC_FAILURE_STR.format( + superstr=super().__str__(), + formatted=formatted, + ) + + +def create(): + """Return a new (idle) Python interpreter.""" + id = _interpreters.create(reqrefs=True) + return Interpreter(id, _ownsref=True) + + +def list_all(): + """Return all existing interpreters.""" + return [Interpreter(id, _whence=whence) + for id, whence in _interpreters.list_all(require_ready=True)] + + +def get_current(): + """Return the currently running interpreter.""" + id, whence = _interpreters.get_current() + return Interpreter(id, _whence=whence) + + +def get_main(): + """Return the main interpreter.""" + id, whence = _interpreters.get_main() + assert whence == _interpreters.WHENCE_RUNTIME, repr(whence) + return Interpreter(id, _whence=whence) + + +_known = weakref.WeakValueDictionary() + +class Interpreter: + """A single Python interpreter. + + Attributes: + + "id" - the unique process-global ID number for the interpreter + "whence" - indicates where the interpreter was created + + If the interpreter wasn't created by this module + then any method that modifies the interpreter will fail, + i.e. .close(), .prepare_main(), .exec(), and .call() + """ + + _WHENCE_TO_STR = { + _interpreters.WHENCE_UNKNOWN: 'unknown', + _interpreters.WHENCE_RUNTIME: 'runtime init', + _interpreters.WHENCE_LEGACY_CAPI: 'legacy C-API', + _interpreters.WHENCE_CAPI: 'C-API', + _interpreters.WHENCE_XI: 'cross-interpreter C-API', + _interpreters.WHENCE_STDLIB: '_interpreters module', + } + + def __new__(cls, id, /, _whence=None, _ownsref=None): + # There is only one instance for any given ID. + if not isinstance(id, int): + raise TypeError(f'id must be an int, got {id!r}') + id = int(id) + if _whence is None: + if _ownsref: + _whence = _interpreters.WHENCE_STDLIB + else: + _whence = _interpreters.whence(id) + assert _whence in cls._WHENCE_TO_STR, repr(_whence) + if _ownsref is None: + _ownsref = (_whence == _interpreters.WHENCE_STDLIB) + try: + self = _known[id] + assert hasattr(self, '_ownsref') + except KeyError: + self = super().__new__(cls) + _known[id] = self + self._id = id + self._whence = _whence + self._ownsref = _ownsref + if _ownsref: + # This may raise InterpreterNotFoundError: + _interpreters.incref(id) + return self + + def __repr__(self): + return f'{type(self).__name__}({self.id})' + + def __hash__(self): + return hash(self._id) + + def __del__(self): + self._decref() + + # for pickling: + def __reduce__(self): + return (type(self), (self._id,)) + + # gh-135729: Globals might be destroyed by the time this is called, so we + # need to keep references ourself + def _decref(self, *, + InterpreterNotFoundError=InterpreterNotFoundError, + _interp_decref=_interpreters.decref, + ): + if not self._ownsref: + return + self._ownsref = False + try: + _interp_decref(self._id) + except InterpreterNotFoundError: + pass + + @property + def id(self): + return self._id + + @property + def whence(self): + return self._WHENCE_TO_STR[self._whence] + + def is_running(self): + """Return whether or not the identified interpreter is running.""" + return _interpreters.is_running(self._id) + + # Everything past here is available only to interpreters created by + # interpreters.create(). + + def close(self): + """Finalize and destroy the interpreter. + + Attempting to destroy the current interpreter results + in an InterpreterError. + """ + return _interpreters.destroy(self._id, restrict=True) + + def prepare_main(self, ns=None, /, **kwargs): + """Bind the given values into the interpreter's __main__. + + The values must be shareable. + """ + ns = dict(ns, **kwargs) if ns is not None else kwargs + _interpreters.set___main___attrs(self._id, ns, restrict=True) + + def exec(self, code, /): + """Run the given source code in the interpreter. + + This is essentially the same as calling the builtin "exec" + with this interpreter, using the __dict__ of its __main__ + module as both globals and locals. + + There is no return value. + + If the code raises an unhandled exception then an ExecutionFailed + exception is raised, which summarizes the unhandled exception. + The actual exception is discarded because objects cannot be + shared between interpreters. + + This blocks the current Python thread until done. During + that time, the previous interpreter is allowed to run + in other threads. + """ + excinfo = _interpreters.exec(self._id, code, restrict=True) + if excinfo is not None: + raise ExecutionFailed(excinfo) + + def _call(self, callable, args, kwargs): + res, excinfo = _interpreters.call(self._id, callable, args, kwargs, restrict=True) + if excinfo is not None: + raise ExecutionFailed(excinfo) + return res + + def call(self, callable, /, *args, **kwargs): + """Call the object in the interpreter with given args/kwargs. + + Nearly all callables, args, kwargs, and return values are + supported. All "shareable" objects are supported, as are + "stateless" functions (meaning non-closures that do not use + any globals). This method will fall back to pickle. + + If the callable raises an exception then the error display + (including full traceback) is sent back between the interpreters + and an ExecutionFailed exception is raised, much like what + happens with Interpreter.exec(). + """ + return self._call(callable, args, kwargs) + + def call_in_thread(self, callable, /, *args, **kwargs): + """Return a new thread that calls the object in the interpreter. + + The return value and any raised exception are discarded. + """ + t = threading.Thread(target=self._call, args=(callable, args, kwargs)) + t.start() + return t diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/concurrent/interpreters/_crossinterp.py b/src/_nfdos/rootfs/usr/lib/python3.13/concurrent/interpreters/_crossinterp.py new file mode 100644 index 0000000..a5f46b2 --- /dev/null +++ b/src/_nfdos/rootfs/usr/lib/python3.13/concurrent/interpreters/_crossinterp.py @@ -0,0 +1,107 @@ +"""Common code between queues and channels.""" + + +class ItemInterpreterDestroyed(Exception): + """Raised when trying to get an item whose interpreter was destroyed.""" + + +class classonly: + """A non-data descriptor that makes a value only visible on the class. + + This is like the "classmethod" builtin, but does not show up on + instances of the class. It may be used as a decorator. + """ + + def __init__(self, value): + self.value = value + self.getter = classmethod(value).__get__ + self.name = None + + def __set_name__(self, cls, name): + if self.name is not None: + raise TypeError('already used') + self.name = name + + def __get__(self, obj, cls): + if obj is not None: + raise AttributeError(self.name) + # called on the class + return self.getter(None, cls) + + +class UnboundItem: + """Represents a cross-interpreter item no longer bound to an interpreter. + + An item is unbound when the interpreter that added it to the + cross-interpreter container is destroyed. + """ + + __slots__ = () + + @classonly + def singleton(cls, kind, module, name='UNBOUND'): + doc = cls.__doc__ + if doc: + doc = doc.replace( + 'cross-interpreter container', kind, + ).replace( + 'cross-interpreter', kind, + ) + subclass = type( + f'Unbound{kind.capitalize()}Item', + (cls,), + { + "_MODULE": module, + "_NAME": name, + "__doc__": doc, + }, + ) + return object.__new__(subclass) + + _MODULE = __name__ + _NAME = 'UNBOUND' + + def __new__(cls): + raise Exception(f'use {cls._MODULE}.{cls._NAME}') + + def __repr__(self): + return f'{self._MODULE}.{self._NAME}' +# return f'interpreters._queues.UNBOUND' + + +UNBOUND = object.__new__(UnboundItem) +UNBOUND_ERROR = object() +UNBOUND_REMOVE = object() + +_UNBOUND_CONSTANT_TO_FLAG = { + UNBOUND_REMOVE: 1, + UNBOUND_ERROR: 2, + UNBOUND: 3, +} +_UNBOUND_FLAG_TO_CONSTANT = {v: k + for k, v in _UNBOUND_CONSTANT_TO_FLAG.items()} + + +def serialize_unbound(unbound): + op = unbound + try: + flag = _UNBOUND_CONSTANT_TO_FLAG[op] + except KeyError: + raise NotImplementedError(f'unsupported unbound replacement op {op!r}') + return flag, + + +def resolve_unbound(flag, exctype_destroyed): + try: + op = _UNBOUND_FLAG_TO_CONSTANT[flag] + except KeyError: + raise NotImplementedError(f'unsupported unbound replacement op {flag!r}') + if op is UNBOUND_REMOVE: + # "remove" not possible here + raise NotImplementedError + elif op is UNBOUND_ERROR: + raise exctype_destroyed("item's original interpreter destroyed") + elif op is UNBOUND: + return UNBOUND + else: + raise NotImplementedError(repr(op)) diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/concurrent/interpreters/_queues.py b/src/_nfdos/rootfs/usr/lib/python3.13/concurrent/interpreters/_queues.py new file mode 100644 index 0000000..b5cc0b8 --- /dev/null +++ b/src/_nfdos/rootfs/usr/lib/python3.13/concurrent/interpreters/_queues.py @@ -0,0 +1,288 @@ +"""Cross-interpreter Queues High Level Module.""" + +import queue +import time +import weakref +import _interpqueues as _queues +from . import _crossinterp + +# aliases: +from _interpqueues import ( + QueueError, QueueNotFoundError, +) +from ._crossinterp import ( + UNBOUND_ERROR, UNBOUND_REMOVE, +) + +__all__ = [ + 'UNBOUND', 'UNBOUND_ERROR', 'UNBOUND_REMOVE', + 'create', 'list_all', + 'Queue', + 'QueueError', 'QueueNotFoundError', 'QueueEmpty', 'QueueFull', + 'ItemInterpreterDestroyed', +] + + +class QueueEmpty(QueueError, queue.Empty): + """Raised from get_nowait() when the queue is empty. + + It is also raised from get() if it times out. + """ + + +class QueueFull(QueueError, queue.Full): + """Raised from put_nowait() when the queue is full. + + It is also raised from put() if it times out. + """ + + +class ItemInterpreterDestroyed(QueueError, + _crossinterp.ItemInterpreterDestroyed): + """Raised from get() and get_nowait().""" + + +_SHARED_ONLY = 0 +_PICKLED = 1 + + +UNBOUND = _crossinterp.UnboundItem.singleton('queue', __name__) + + +def _serialize_unbound(unbound): + if unbound is UNBOUND: + unbound = _crossinterp.UNBOUND + return _crossinterp.serialize_unbound(unbound) + + +def _resolve_unbound(flag): + resolved = _crossinterp.resolve_unbound(flag, ItemInterpreterDestroyed) + if resolved is _crossinterp.UNBOUND: + resolved = UNBOUND + return resolved + + +def create(maxsize=0, *, unbounditems=UNBOUND): + """Return a new cross-interpreter queue. + + The queue may be used to pass data safely between interpreters. + + "unbounditems" sets the default for Queue.put(); see that method for + supported values. The default value is UNBOUND, which replaces + the unbound item. + """ + unbound = _serialize_unbound(unbounditems) + unboundop, = unbound + qid = _queues.create(maxsize, unboundop, -1) + self = Queue(qid) + self._set_unbound(unboundop, unbounditems) + return self + + +def list_all(): + """Return a list of all open queues.""" + queues = [] + for qid, unboundop, _ in _queues.list_all(): + self = Queue(qid) + if not hasattr(self, '_unbound'): + self._set_unbound(unboundop) + else: + assert self._unbound[0] == unboundop + queues.append(self) + return queues + + +_known_queues = weakref.WeakValueDictionary() + +class Queue: + """A cross-interpreter queue.""" + + def __new__(cls, id, /): + # There is only one instance for any given ID. + if isinstance(id, int): + id = int(id) + else: + raise TypeError(f'id must be an int, got {id!r}') + try: + self = _known_queues[id] + except KeyError: + self = super().__new__(cls) + self._id = id + _known_queues[id] = self + _queues.bind(id) + return self + + def __del__(self): + try: + _queues.release(self._id) + except QueueNotFoundError: + pass + try: + del _known_queues[self._id] + except KeyError: + pass + + def __repr__(self): + return f'{type(self).__name__}({self.id})' + + def __hash__(self): + return hash(self._id) + + # for pickling: + def __reduce__(self): + return (type(self), (self._id,)) + + def _set_unbound(self, op, items=None): + assert not hasattr(self, '_unbound') + if items is None: + items = _resolve_unbound(op) + unbound = (op, items) + self._unbound = unbound + return unbound + + @property + def id(self): + return self._id + + @property + def unbounditems(self): + try: + _, items = self._unbound + except AttributeError: + op, _ = _queues.get_queue_defaults(self._id) + _, items = self._set_unbound(op) + return items + + @property + def maxsize(self): + try: + return self._maxsize + except AttributeError: + self._maxsize = _queues.get_maxsize(self._id) + return self._maxsize + + def empty(self): + return self.qsize() == 0 + + def full(self): + return _queues.is_full(self._id) + + def qsize(self): + return _queues.get_count(self._id) + + def put(self, obj, block=True, timeout=None, *, + unbounditems=None, + _delay=10 / 1000, # 10 milliseconds + ): + """Add the object to the queue. + + If "block" is true, this blocks while the queue is full. + + For most objects, the object received through Queue.get() will + be a new one, equivalent to the original and not sharing any + actual underlying data. The notable exceptions include + cross-interpreter types (like Queue) and memoryview, where the + underlying data is actually shared. Furthermore, some types + can be sent through a queue more efficiently than others. This + group includes various immutable types like int, str, bytes, and + tuple (if the items are likewise efficiently shareable). See interpreters.is_shareable(). + + "unbounditems" controls the behavior of Queue.get() for the given + object if the current interpreter (calling put()) is later + destroyed. + + If "unbounditems" is None (the default) then it uses the + queue's default, set with create_queue(), + which is usually UNBOUND. + + If "unbounditems" is UNBOUND_ERROR then get() will raise an + ItemInterpreterDestroyed exception if the original interpreter + has been destroyed. This does not otherwise affect the queue; + the next call to put() will work like normal, returning the next + item in the queue. + + If "unbounditems" is UNBOUND_REMOVE then the item will be removed + from the queue as soon as the original interpreter is destroyed. + Be aware that this will introduce an imbalance between put() + and get() calls. + + If "unbounditems" is UNBOUND then it is returned by get() in place + of the unbound item. + """ + if not block: + return self.put_nowait(obj, unbounditems=unbounditems) + if unbounditems is None: + unboundop = -1 + else: + unboundop, = _serialize_unbound(unbounditems) + if timeout is not None: + timeout = int(timeout) + if timeout < 0: + raise ValueError(f'timeout value must be non-negative') + end = time.time() + timeout + while True: + try: + _queues.put(self._id, obj, unboundop) + except QueueFull as exc: + if timeout is not None and time.time() >= end: + raise # re-raise + time.sleep(_delay) + else: + break + + def put_nowait(self, obj, *, unbounditems=None): + if unbounditems is None: + unboundop = -1 + else: + unboundop, = _serialize_unbound(unbounditems) + _queues.put(self._id, obj, unboundop) + + def get(self, block=True, timeout=None, *, + _delay=10 / 1000, # 10 milliseconds + ): + """Return the next object from the queue. + + If "block" is true, this blocks while the queue is empty. + + If the next item's original interpreter has been destroyed + then the "next object" is determined by the value of the + "unbounditems" argument to put(). + """ + if not block: + return self.get_nowait() + if timeout is not None: + timeout = int(timeout) + if timeout < 0: + raise ValueError(f'timeout value must be non-negative') + end = time.time() + timeout + while True: + try: + obj, unboundop = _queues.get(self._id) + except QueueEmpty as exc: + if timeout is not None and time.time() >= end: + raise # re-raise + time.sleep(_delay) + else: + break + if unboundop is not None: + assert obj is None, repr(obj) + return _resolve_unbound(unboundop) + return obj + + def get_nowait(self): + """Return the next object from the channel. + + If the queue is empty then raise QueueEmpty. Otherwise this + is the same as get(). + """ + try: + obj, unboundop = _queues.get(self._id) + except QueueEmpty as exc: + raise # re-raise + if unboundop is not None: + assert obj is None, repr(obj) + return _resolve_unbound(unboundop) + return obj + + +_queues._register_heap_types(Queue, QueueEmpty, QueueFull) diff --git a/src/_nfdos/rootfs/usr/lib/python3.13/configparser.py b/src/_nfdos/rootfs/usr/lib/python3.13/configparser.py new file mode 100644 index 0000000..18af1ea --- /dev/null +++ b/src/_nfdos/rootfs/usr/lib/python3.13/configparser.py @@ -0,0 +1,1414 @@ +"""Configuration file parser. + +A configuration file consists of sections, lead by a "[section]" header, +and followed by "name: value" entries, with continuations and such in +the style of RFC 822. + +Intrinsic defaults can be specified by passing them into the +ConfigParser constructor as a dictionary. + +class: + +ConfigParser -- responsible for parsing a list of + configuration files, and managing the parsed database. + + methods: + + __init__(defaults=None, dict_type=_default_dict, allow_no_value=False, + delimiters=('=', ':'), comment_prefixes=('#', ';'), + inline_comment_prefixes=None, strict=True, + empty_lines_in_values=True, default_section='DEFAULT', + interpolation=, converters=, + allow_unnamed_section=False): + Create the parser. When `defaults` is given, it is initialized into the + dictionary or intrinsic defaults. The keys must be strings, the values + must be appropriate for %()s string interpolation. + + When `dict_type` is given, it will be used to create the dictionary + objects for the list of sections, for the options within a section, and + for the default values. + + When `delimiters` is given, it will be used as the set of substrings + that divide keys from values. + + When `comment_prefixes` is given, it will be used as the set of + substrings that prefix comments in empty lines. Comments can be + indented. + + When `inline_comment_prefixes` is given, it will be used as the set of + substrings that prefix comments in non-empty lines. + + When `strict` is True, the parser won't allow for any section or option + duplicates while reading from a single source (file, string or + dictionary). Default is True. + + When `empty_lines_in_values` is False (default: True), each empty line + marks the end of an option. Otherwise, internal empty lines of + a multiline option are kept as part of the value. + + When `allow_no_value` is True (default: False), options without + values are accepted; the value presented for these is None. + + When `default_section` is given, the name of the special section is + named accordingly. By default it is called ``"DEFAULT"`` but this can + be customized to point to any other valid section name. Its current + value can be retrieved using the ``parser_instance.default_section`` + attribute and may be modified at runtime. + + When `interpolation` is given, it should be an Interpolation subclass + instance. It will be used as the handler for option value + pre-processing when using getters. RawConfigParser objects don't do + any sort of interpolation, whereas ConfigParser uses an instance of + BasicInterpolation. The library also provides a ``zc.buildout`` + inspired ExtendedInterpolation implementation. + + When `converters` is given, it should be a dictionary where each key + represents the name of a type converter and each value is a callable + implementing the conversion from string to the desired datatype. Every + converter gets its corresponding get*() method on the parser object and + section proxies. + + When `allow_unnamed_section` is True (default: False), options + without section are accepted: the section for these is + ``configparser.UNNAMED_SECTION``. + + sections() + Return all the configuration section names, sans DEFAULT. + + has_section(section) + Return whether the given section exists. + + has_option(section, option) + Return whether the given option exists in the given section. + + options(section) + Return list of configuration options for the named section. + + read(filenames, encoding=None) + Read and parse the iterable of named configuration files, given by + name. A single filename is also allowed. Non-existing files + are ignored. Return list of successfully read files. + + read_file(f, filename=None) + Read and parse one configuration file, given as a file object. + The filename defaults to f.name; it is only used in error + messages (if f has no `name` attribute, the string `` is used). + + read_string(string) + Read configuration from a given string. + + read_dict(dictionary) + Read configuration from a dictionary. Keys are section names, + values are dictionaries with keys and values that should be present + in the section. If the used dictionary type preserves order, sections + and their keys will be added in order. Values are automatically + converted to strings. + + get(section, option, raw=False, vars=None, fallback=_UNSET) + Return a string value for the named option. All % interpolations are + expanded in the return values, based on the defaults passed into the + constructor and the DEFAULT section. Additional substitutions may be + provided using the `vars` argument, which must be a dictionary whose + contents override any pre-existing defaults. If `option` is a key in + `vars`, the value from `vars` is used. + + getint(section, options, raw=False, vars=None, fallback=_UNSET) + Like get(), but convert value to an integer. + + getfloat(section, options, raw=False, vars=None, fallback=_UNSET) + Like get(), but convert value to a float. + + getboolean(section, options, raw=False, vars=None, fallback=_UNSET) + Like get(), but convert value to a boolean (currently case + insensitively defined as 0, false, no, off for False, and 1, true, + yes, on for True). Returns False or True. + + items(section=_UNSET, raw=False, vars=None) + If section is given, return a list of tuples with (name, value) for + each option in the section. Otherwise, return a list of tuples with + (section_name, section_proxy) for each section, including DEFAULTSECT. + + remove_section(section) + Remove the given file section and all its options. + + remove_option(section, option) + Remove the given option from the given section. + + set(section, option, value) + Set the given option. + + write(fp, space_around_delimiters=True) + Write the configuration state in .ini format. If + `space_around_delimiters` is True (the default), delimiters + between keys and values are surrounded by spaces. +""" + +# Do not import dataclasses; overhead is unacceptable (gh-117703) + +from collections.abc import Iterable, MutableMapping +from collections import ChainMap as _ChainMap +import contextlib +import functools +import io +import itertools +import os +import re +import sys + +__all__ = ("NoSectionError", "DuplicateOptionError", "DuplicateSectionError", + "NoOptionError", "InterpolationError", "InterpolationDepthError", + "InterpolationMissingOptionError", "InterpolationSyntaxError", + "ParsingError", "MissingSectionHeaderError", + "MultilineContinuationError", "UnnamedSectionDisabledError", + "InvalidWriteError", "ConfigParser", "RawConfigParser", + "Interpolation", "BasicInterpolation", "ExtendedInterpolation", + "SectionProxy", "ConverterMapping", + "DEFAULTSECT", "MAX_INTERPOLATION_DEPTH", "UNNAMED_SECTION") + +_default_dict = dict +DEFAULTSECT = "DEFAULT" + +MAX_INTERPOLATION_DEPTH = 10 + + + +# exception classes +class Error(Exception): + """Base class for ConfigParser exceptions.""" + + def __init__(self, msg=''): + self.message = msg + Exception.__init__(self, msg) + + def __repr__(self): + return self.message + + __str__ = __repr__ + + +class NoSectionError(Error): + """Raised when no section matches a requested option.""" + + def __init__(self, section): + Error.__init__(self, 'No section: %r' % (section,)) + self.section = section + self.args = (section, ) + + +class DuplicateSectionError(Error): + """Raised when a section is repeated in an input source. + + Possible repetitions that raise this exception are: multiple creation + using the API or in strict parsers when a section is found more than once + in a single input file, string or dictionary. + """ + + def __init__(self, section, source=None, lineno=None): + msg = [repr(section), " already exists"] + if source is not None: + message = ["While reading from ", repr(source)] + if lineno is not None: + message.append(" [line {0:2d}]".format(lineno)) + message.append(": section ") + message.extend(msg) + msg = message + else: + msg.insert(0, "Section ") + Error.__init__(self, "".join(msg)) + self.section = section + self.source = source + self.lineno = lineno + self.args = (section, source, lineno) + + +class DuplicateOptionError(Error): + """Raised by strict parsers when an option is repeated in an input source. + + Current implementation raises this exception only when an option is found + more than once in a single file, string or dictionary. + """ + + def __init__(self, section, option, source=None, lineno=None): + msg = [repr(option), " in section ", repr(section), + " already exists"] + if source is not None: + message = ["While reading from ", repr(source)] + if lineno is not None: + message.append(" [line {0:2d}]".format(lineno)) + message.append(": option ") + message.extend(msg) + msg = message + else: + msg.insert(0, "Option ") + Error.__init__(self, "".join(msg)) + self.section = section + self.option = option + self.source = source + self.lineno = lineno + self.args = (section, option, source, lineno) + + +class NoOptionError(Error): + """A requested option was not found.""" + + def __init__(self, option, section): + Error.__init__(self, "No option %r in section: %r" % + (option, section)) + self.option = option + self.section = section + self.args = (option, section) + + +class InterpolationError(Error): + """Base class for interpolation-related exceptions.""" + + def __init__(self, option, section, msg): + Error.__init__(self, msg) + self.option = option + self.section = section + self.args = (option, section, msg) + + +class InterpolationMissingOptionError(InterpolationError): + """A string substitution required a setting which was not available.""" + + def __init__(self, option, section, rawval, reference): + msg = ("Bad value substitution: option {!r} in section {!r} contains " + "an interpolation key {!r} which is not a valid option name. " + "Raw value: {!r}".format(option, section, reference, rawval)) + InterpolationError.__init__(self, option, section, msg) + self.reference = reference + self.args = (option, section, rawval, reference) + + +class InterpolationSyntaxError(InterpolationError): + """Raised when the source text contains invalid syntax. + + Current implementation raises this exception when the source text into + which substitutions are made does not conform to the required syntax. + """ + + +class InterpolationDepthError(InterpolationError): + """Raised when substitutions are nested too deeply.""" + + def __init__(self, option, section, rawval): + msg = ("Recursion limit exceeded in value substitution: option {!r} " + "in section {!r} contains an interpolation key which " + "cannot be substituted in {} steps. Raw value: {!r}" + "".format(option, section, MAX_INTERPOLATION_DEPTH, + rawval)) + InterpolationError.__init__(self, option, section, msg) + self.args = (option, section, rawval) + + +class ParsingError(Error): + """Raised when a configuration file does not follow legal syntax.""" + + def __init__(self, source, *args): + super().__init__(f'Source contains parsing errors: {source!r}') + self.source = source + self.errors = [] + self.args = (source, ) + if args: + self.append(*args) + + def append(self, lineno, line): + self.errors.append((lineno, line)) + self.message += '\n\t[line %2d]: %s' % (lineno, repr(line)) + + def combine(self, others): + for other in others: + for error in other.errors: + self.append(*error) + return self + + @staticmethod + def _raise_all(exceptions: Iterable['ParsingError']): + """ + Combine any number of ParsingErrors into one and raise it. + """ + exceptions = iter(exceptions) + with contextlib.suppress(StopIteration): + raise next(exceptions).combine(exceptions) + + + +class MissingSectionHeaderError(ParsingError): + """Raised when a key-value pair is found before any section header.""" + + def __init__(self, filename, lineno, line): + Error.__init__( + self, + 'File contains no section headers.\nfile: %r, line: %d\n%r' % + (filename, lineno, line)) + self.source = filename + self.lineno = lineno + self.line = line + self.args = (filename, lineno, line) + + +class MultilineContinuationError(ParsingError): + """Raised when a key without value is followed by continuation line""" + def __init__(self, filename, lineno, line): + Error.__init__( + self, + "Key without value continued with an indented line.\n" + "file: %r, line: %d\n%r" + %(filename, lineno, line)) + self.source = filename + self.lineno = lineno + self.line = line + self.args = (filename, lineno, line) + + +class UnnamedSectionDisabledError(Error): + """Raised when an attempt to use UNNAMED_SECTION is made with the + feature disabled.""" + def __init__(self): + Error.__init__(self, "Support for UNNAMED_SECTION is disabled.") + + +class _UnnamedSection: + + def __repr__(self): + return "" + +class InvalidWriteError(Error): + """Raised when attempting to write data that the parser would read back differently. + ex: writing a key which begins with the section header pattern would read back as a + new section """ + + def __init__(self, msg=''): + Error.__init__(self, msg) + + +UNNAMED_SECTION = _UnnamedSection() + + +# Used in parser getters to indicate the default behaviour when a specific +# option is not found it to raise an exception. Created to enable `None` as +# a valid fallback value. +_UNSET = object() + + +class Interpolation: + """Dummy interpolation that passes the value through with no changes.""" + + def before_get(self, parser, section, option, value, defaults): + return value + + def before_set(self, parser, section, option, value): + return value + + def before_read(self, parser, section, option, value): + return value + + def before_write(self, parser, section, option, value): + return value + + +class BasicInterpolation(Interpolation): + """Interpolation as implemented in the classic ConfigParser. + + The option values can contain format strings which refer to other values in + the same section, or values in the special default section. + + For example: + + something: %(dir)s/whatever + + would resolve the "%(dir)s" to the value of dir. All reference + expansions are done late, on demand. If a user needs to use a bare % in + a configuration file, she can escape it by writing %%. Other % usage + is considered a user error and raises `InterpolationSyntaxError`.""" + + _KEYCRE = re.compile(r"%\(([^)]+)\)s") + + def before_get(self, parser, section, option, value, defaults): + L = [] + self._interpolate_some(parser, option, L, value, section, defaults, 1) + return ''.join(L) + + def before_set(self, parser, section, option, value): + tmp_value = value.replace('%%', '') # escaped percent signs + tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax + if '%' in tmp_value: + raise ValueError("invalid interpolation syntax in %r at " + "position %d" % (value, tmp_value.find('%'))) + return value + + def _interpolate_some(self, parser, option, accum, rest, section, map, + depth): + rawval = parser.get(section, option, raw=True, fallback=rest) + if depth > MAX_INTERPOLATION_DEPTH: + raise InterpolationDepthError(option, section, rawval) + while rest: + p = rest.find("%") + if p < 0: + accum.append(rest) + return + if p > 0: + accum.append(rest[:p]) + rest = rest[p:] + # p is no longer used + c = rest[1:2] + if c == "%": + accum.append("%") + rest = rest[2:] + elif c == "(": + m = self._KEYCRE.match(rest) + if m is None: + raise InterpolationSyntaxError(option, section, + "bad interpolation variable reference %r" % rest) + var = parser.optionxform(m.group(1)) + rest = rest[m.end():] + try: + v = map[var] + except KeyError: + raise InterpolationMissingOptionError( + option, section, rawval, var) from None + if "%" in v: + self._interpolate_some(parser, option, accum, v, + section, map, depth + 1) + else: + accum.append(v) + else: + raise InterpolationSyntaxError( + option, section, + "'%%' must be followed by '%%' or '(', " + "found: %r" % (rest,)) + + +class ExtendedInterpolation(Interpolation): + """Advanced variant of interpolation, supports the syntax used by + `zc.buildout`. Enables interpolation between sections.""" + + _KEYCRE = re.compile(r"\$\{([^}]+)\}") + + def before_get(self, parser, section, option, value, defaults): + L = [] + self._interpolate_some(parser, option, L, value, section, defaults, 1) + return ''.join(L) + + def before_set(self, parser, section, option, value): + tmp_value = value.replace('$$', '') # escaped dollar signs + tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax + if '$' in tmp_value: + raise ValueError("invalid interpolation syntax in %r at " + "position %d" % (value, tmp_value.find('$'))) + return value + + def _interpolate_some(self, parser, option, accum, rest, section, map, + depth): + rawval = parser.get(section, option, raw=True, fallback=rest) + if depth > MAX_INTERPOLATION_DEPTH: + raise InterpolationDepthError(option, section, rawval) + while rest: + p = rest.find("$") + if p < 0: + accum.append(rest) + return + if p > 0: + accum.append(rest[:p]) + rest = rest[p:] + # p is no longer used + c = rest[1:2] + if c == "$": + accum.append("$") + rest = rest[2:] + elif c == "{": + m = self._KEYCRE.match(rest) + if m is None: + raise InterpolationSyntaxError(option, section, + "bad interpolation variable reference %r" % rest) + path = m.group(1).split(':') + rest = rest[m.end():] + sect = section + opt = option + try: + if len(path) == 1: + opt = parser.optionxform(path[0]) + v = map[opt] + elif len(path) == 2: + sect = path[0] + opt = parser.optionxform(path[1]) + v = parser.get(sect, opt, raw=True) + else: + raise InterpolationSyntaxError( + option, section, + "More than one ':' found: %r" % (rest,)) + except (KeyError, NoSectionError, NoOptionError): + raise InterpolationMissingOptionError( + option, section, rawval, ":".join(path)) from None + if v is None: + continue + if "$" in v: + self._interpolate_some(parser, opt, accum, v, sect, + dict(parser.items(sect, raw=True)), + depth + 1) + else: + accum.append(v) + else: + raise InterpolationSyntaxError( + option, section, + "'$' must be followed by '$' or '{', " + "found: %r" % (rest,)) + + +class _ReadState: + elements_added : set[str] + cursect : dict[str, str] | None = None + sectname : str | None = None + optname : str | None = None + lineno : int = 0 + indent_level : int = 0 + errors : list[ParsingError] + + def __init__(self): + self.elements_added = set() + self.errors = list() + + +class _Line(str): + __slots__ = 'clean', 'has_comments' + + def __new__(cls, val, *args, **kwargs): + return super().__new__(cls, val) + + def __init__(self, val, comments): + trimmed = val.strip() + self.clean = comments.strip(trimmed) + self.has_comments = trimmed != self.clean + + +class _CommentSpec: + def __init__(self, full_prefixes, inline_prefixes): + full_patterns = ( + # prefix at the beginning of a line + fr'^({re.escape(prefix)}).*' + for prefix in full_prefixes + ) + inline_patterns = ( + # prefix at the beginning of the line or following a space + fr'(^|\s)({re.escape(prefix)}.*)' + for prefix in inline_prefixes + ) + self.pattern = re.compile('|'.join(itertools.chain(full_patterns, inline_patterns))) + + def strip(self, text): + return self.pattern.sub('', text).rstrip() + + def wrap(self, text): + return _Line(text, self) + + +class RawConfigParser(MutableMapping): + """ConfigParser that does not do interpolation.""" + + # Regular expressions for parsing section headers and options + _SECT_TMPL = r""" + \[ # [ + (?P
.+) # very permissive! + \] # ] + """ + _OPT_TMPL = r""" + (?P