diff --git a/main/rust/APKBUILD b/main/rust/APKBUILD
index 8f86656dd6d298cd619bcc387d6fa53e254004a3..9c5103bf32eb16f6760a62cbae543914cb5357db 100644
--- a/main/rust/APKBUILD
+++ b/main/rust/APKBUILD
@@ -5,7 +5,7 @@
 # Contributor: Ariadne Conill <ariadne@dereferenced.org>
 # Maintainer:
 pkgname=rust
-pkgver=1.72.1
+pkgver=1.73.0
 _llvmver=16
 _bootver=1.70.0
 pkgrel=0
@@ -95,6 +95,7 @@ source="https://static.rust-lang.org/dist/rustc-$pkgver-src.tar.xz
 	need-rpath.patch
 	need-ssp_nonshared.patch
 	no-export-ld-library-path.patch
+	revert-pr-114382.patch
 	system-wasm-ld.patch
 	check-rustc
 	"
@@ -154,16 +155,13 @@ prepare() {
 	_clear_vendor_checksums libc
 	_clear_vendor_checksums libc-0.2.138
 	_clear_vendor_checksums libc-0.2.140
-	_clear_vendor_checksums libc-0.2.141
 	_clear_vendor_checksums libc-0.2.146
 
 	patch -Np1 -d vendor/libc-0.2.138 < "$srcdir"/lfs64-libc.patch.noauto
 	patch -Np1 -d vendor/libc-0.2.140 < "$srcdir"/lfs64-libc.patch.noauto
-	patch -Np1 -d vendor/libc-0.2.141 < "$srcdir"/lfs64-libc.patch.noauto
 
 	patch -Np1 -d vendor/libc-0.2.138 < "$srcdir"/libc-s390x-olargefile.patch.noauto
 	patch -Np1 -d vendor/libc-0.2.140 < "$srcdir"/libc-s390x-olargefile.patch.noauto
-	patch -Np1 -d vendor/libc-0.2.141 < "$srcdir"/libc-s390x-olargefile.patch.noauto
 }
 
 build() {
@@ -410,7 +408,6 @@ cargo() {
 	provides="cargo-bootstrap=$pkgver-r$pkgrel"
 
 	amove usr/bin/cargo
-	amove usr/libexec/cargo-credential-1password
 }
 
 rustfmt() {
@@ -457,12 +454,12 @@ _mv() {
 }
 
 sha512sums="
-08232b5bf36f82a995d67f3d03d5e35b7d8914d31fb4491d4c37b72a830bc438e9d18d9e138d398b1b6ae4aa09f7f8e1e9b68da6273ab74bdae4c6123586a21b  rustc-1.72.1-src.tar.xz
+75c59680a82cb9d076b9434744a1c65908524ef769293952f5d9c5779d9a9c6fa4d9aa0c7e7d6b7566a21a50a27cd6ae452b5283a4d4606b2fa1acc24dfd8e0c  rustc-1.73.0-src.tar.xz
 9de9d7ad05584e3cafddedcac409a4605a1d67e82260220deb3efd965603ee3148062b081c0cd736e50d82daad3a723fb6a7493fdf4dddfe36288645425a7946  alpine-move-py-scripts-to-share.patch
 a97bc511a8fd5f71ea7095efa4cf97048badc4ba46d1bde1b7d5ede2879f099157b8b6f5a03278c9f5766d809ea5d6a1affadce7ceb47d0cc134fde8bc70d51d  alpine-target.patch
 7af23c34096f7a03c5a1b40aa07e00dd6193743c872881018a9baa900f240c6e44918e481ab5ed7787a70ef361e4aeec2a7719bcd695f52988262e23271b44e5  do-not-install-libunwind-source.patch
 92916225c21742941aaf415d75d330fe2baf77a8226c076e862f4413068d5dee7b08c4b3182f69957ea9a51a0dcde145031565289015b6e3c31894fb396e4eb1  gcc-eh-libunwind.patch
-2df372bbf9b8c58c2e2ff69cb48ef8b2046710b7f0762e3bf64443298a5973d463227881247aa7a5c31b39e689035ae6283d5f3a209c8dd5d8a39aa0ab3e9a9a  install-template-shebang.patch
+c9d6008612c693536a4064545c02fe5261f10ee4021f856d058f6235cb80931eb8cec97f75d0f7075aa9333c8fbb4fd715d5d53be9e2e1e77db7365de8da717e  install-template-shebang.patch
 8d7236bfe583915be81c28b8b6296d69a7dda0246bd0d294298659825fbffcb4fa21c5ad01c351d358f269a620f3c59b9e7be77cd73a5214cc32ba15efa2fb0d  lfs64-libc.patch.noauto
 6a15ea16acaa15a57e8c0f6ae4ccf3e91c6f395249fb2186766642131ee5d37ecf04ade829e1712dce77684b77a54caa6b4356416dd1d3a6832187332531381f  lfs64-rust.patch
 aa81a1202a0ab45cb3e52c6caa9acf1fae592affaa925cff79601a65396a81f1e40715872c0d19a12edbf841282b6f263e986b566e3860d7e78ffa9bf49b8bc5  libc-s390x-olargefile.patch.noauto
@@ -470,6 +467,7 @@ aa81a1202a0ab45cb3e52c6caa9acf1fae592affaa925cff79601a65396a81f1e40715872c0d19a1
 2d401a01d2c2db92624d672018342907a41aa4a3842708c0a2ab694af0396d568aca4835d1075f244d77c0a4c45e67f893fd175fd97cd7f758090eba9972eba6  need-rpath.patch
 bceb500942913d1a01a764dc7ee35437e83b9aec3f945c9186396264df240f465e5ac4d1f70e7252ca4ec346ffaf7c79747ba1a53ff4123cbd408cfa5effa214  need-ssp_nonshared.patch
 cbdb38a2701e079d362e76ff16a2549230f80bcb4dfd3561d240df331fb21d202cb720e18b97fe9b166db1f79b46fab949ce5de5e37af2ed78a10c7fe1dfcfc9  no-export-ld-library-path.patch
+7875edfa5b51c32eac05d8958067c07fe203d6f09a199cb09d8b63da83214d60aef7a2191ac535d96431c4a9a82dd66f0b069ff68f7b2a941c736174470ea1fb  revert-pr-114382.patch
 4ea59b28fa238b8bb0cb24847e5884b65beec5168e55fe41a1ff50e272f1cf49186ffaa71d133cebfdcb70901ea18b477d7618a1dd330ff6c08bed5af0e2bb78  system-wasm-ld.patch
 c31fdfe8a9b3411576c75da46645cf0465b9053000a2ab49cf9b2f2733f679d6d33acbf236d67a20e14935d094a685453b7f1840180249f39d610fd0902c3125  check-rustc
 "
diff --git a/main/rust/install-template-shebang.patch b/main/rust/install-template-shebang.patch
index 130e8037a246689383961bb94d24bdb8f747070f..89cc16eb81dea73b0fbf2abffddd160b760a68b5 100644
--- a/main/rust/install-template-shebang.patch
+++ b/main/rust/install-template-shebang.patch
@@ -3,8 +3,8 @@ The script seems to be POSIX-sh (+ local) compatible.
 --- a/src/tools/rust-installer/install-template.sh
 +++ b/src/tools/rust-installer/install-template.sh
 @@ -1,4 +1,4 @@
--#!/bin/bash
-+#!/bin/sh
+-#!/usr/bin/env bash
++#!/usr/bin/env sh
  
  # No undefined variables
  set -u
diff --git a/main/rust/revert-pr-114382.patch b/main/rust/revert-pr-114382.patch
new file mode 100644
index 0000000000000000000000000000000000000000..a6636c45acfb66a17715fb703b9194b6ecdf5348
--- /dev/null
+++ b/main/rust/revert-pr-114382.patch
@@ -0,0 +1,255 @@
+This reverts https://github.com/rust-lang/rust/pull/114382
+which allows Rust 1.73.0 to build successfully on 32-bit ARM
+
+
+--- a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
++++ b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
+@@ -1155,20 +1155,6 @@
+             ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
+         }
+ 
+-        sym::compare_bytes => {
+-            intrinsic_args!(fx, args => (lhs_ptr, rhs_ptr, bytes_val); intrinsic);
+-            let lhs_ptr = lhs_ptr.load_scalar(fx);
+-            let rhs_ptr = rhs_ptr.load_scalar(fx);
+-            let bytes_val = bytes_val.load_scalar(fx);
+-
+-            let params = vec![AbiParam::new(fx.pointer_type); 3];
+-            let returns = vec![AbiParam::new(types::I32)];
+-            let args = &[lhs_ptr, rhs_ptr, bytes_val];
+-            // Here we assume that the `memcmp` provided by the target is a NOP for size 0.
+-            let cmp = fx.lib_call("memcmp", params, returns, args)[0];
+-            ret.write_cvalue(fx, CValue::by_val(cmp, ret.layout()));
+-        }
+-
+         sym::const_allocate => {
+             intrinsic_args!(fx, args => (_size, _align); intrinsic);
+ 
+--- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
++++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
+@@ -302,21 +302,6 @@
+                     }
+                 }
+ 
+-                sym::compare_bytes => {
+-                    let a = args[0].immediate();
+-                    let b = args[1].immediate();
+-                    let n = args[2].immediate();
+-
+-                    let void_ptr_type = self.context.new_type::<*const ()>();
+-                    let a_ptr = self.bitcast(a, void_ptr_type);
+-                    let b_ptr = self.bitcast(b, void_ptr_type);
+-
+-                    // Here we assume that the `memcmp` provided by the target is a NOP for size 0.
+-                    let builtin = self.context.get_builtin_function("memcmp");
+-                    let cmp = self.context.new_call(None, builtin, &[a_ptr, b_ptr, n]);
+-                    self.sext(cmp, self.type_ix(32))
+-                }
+-
+                 sym::black_box => {
+                     args[0].val.store(self, result);
+ 
+--- a/compiler/rustc_codegen_llvm/src/context.rs
++++ b/compiler/rustc_codegen_llvm/src/context.rs
+@@ -902,8 +902,7 @@
+         ifn!("llvm.prefetch", fn(ptr, t_i32, t_i32, t_i32) -> void);
+ 
+         // This isn't an "LLVM intrinsic", but LLVM's optimization passes
+-        // recognize it like one (including turning it into `bcmp` sometimes)
+-        // and we use it to implement intrinsics like `raw_eq` and `compare_bytes`
++        // recognize it like one and we assume it exists in `core::slice::cmp`
+         match self.sess().target.arch.as_ref() {
+             "avr" | "msp430" => ifn!("memcmp", fn(ptr, ptr, t_isize) -> t_i16),
+             _ => ifn!("memcmp", fn(ptr, ptr, t_isize) -> t_i32),
+--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
++++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
+@@ -329,16 +329,6 @@
+                 }
+             }
+ 
+-            sym::compare_bytes => {
+-                // Here we assume that the `memcmp` provided by the target is a NOP for size 0.
+-                let cmp = self.call_intrinsic(
+-                    "memcmp",
+-                    &[args[0].immediate(), args[1].immediate(), args[2].immediate()],
+-                );
+-                // Some targets have `memcmp` returning `i16`, but the intrinsic is always `i32`.
+-                self.sext(cmp, self.type_ix(32))
+-            }
+-
+             sym::black_box => {
+                 args[0].val.store(self, result);
+                 let result_val_span = [result.llval];
+--- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs
++++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
+@@ -261,10 +261,6 @@
+             sym::write_bytes => {
+                 self.write_bytes_intrinsic(&args[0], &args[1], &args[2])?;
+             }
+-            sym::compare_bytes => {
+-                let result = self.compare_bytes_intrinsic(&args[0], &args[1], &args[2])?;
+-                self.write_scalar(result, dest)?;
+-            }
+             sym::arith_offset => {
+                 let ptr = self.read_pointer(&args[0])?;
+                 let offset_count = self.read_target_isize(&args[1])?;
+@@ -645,24 +641,6 @@
+ 
+         let bytes = std::iter::repeat(byte).take(len.bytes_usize());
+         self.write_bytes_ptr(dst, bytes)
+-    }
+-
+-    pub(crate) fn compare_bytes_intrinsic(
+-        &mut self,
+-        left: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+-        right: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+-        byte_count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+-    ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
+-        let left = self.read_pointer(left)?;
+-        let right = self.read_pointer(right)?;
+-        let n = Size::from_bytes(self.read_target_usize(byte_count)?);
+-
+-        let left_bytes = self.read_bytes_ptr_strip_provenance(left, n)?;
+-        let right_bytes = self.read_bytes_ptr_strip_provenance(right, n)?;
+-
+-        // `Ordering`'s discriminants are -1/0/+1, so casting does the right thing.
+-        let result = Ord::cmp(left_bytes, right_bytes) as i32;
+-        Ok(Scalar::from_i32(result))
+     }
+ 
+     pub(crate) fn raw_eq_intrinsic(
+--- a/compiler/rustc_hir_analysis/src/check/intrinsic.rs
++++ b/compiler/rustc_hir_analysis/src/check/intrinsic.rs
+@@ -273,10 +273,6 @@
+                 ],
+                 Ty::new_unit(tcx),
+             ),
+-            sym::compare_bytes => {
+-                let byte_ptr = Ty::new_imm_ptr(tcx, tcx.types.u8);
+-                (0, vec![byte_ptr, byte_ptr, tcx.types.usize], tcx.types.i32)
+-            }
+             sym::write_bytes | sym::volatile_set_memory => (
+                 1,
+                 vec![
+--- a/compiler/rustc_span/src/symbol.rs
++++ b/compiler/rustc_span/src/symbol.rs
+@@ -501,7 +501,6 @@
+         cold,
+         collapse_debuginfo,
+         column,
+-        compare_bytes,
+         compare_exchange,
+         compare_exchange_weak,
+         compile_error,
+--- a/library/core/src/intrinsics.rs
++++ b/library/core/src/intrinsics.rs
+@@ -2385,25 +2385,6 @@
+     #[rustc_nounwind]
+     pub fn raw_eq<T>(a: &T, b: &T) -> bool;
+ 
+-    /// Lexicographically compare `[left, left + bytes)` and `[right, right + bytes)`
+-    /// as unsigned bytes, returning negative if `left` is less, zero if all the
+-    /// bytes match, or positive if `right` is greater.
+-    ///
+-    /// This underlies things like `<[u8]>::cmp`, and will usually lower to `memcmp`.
+-    ///
+-    /// # Safety
+-    ///
+-    /// `left` and `right` must each be [valid] for reads of `bytes` bytes.
+-    ///
+-    /// Note that this applies to the whole range, not just until the first byte
+-    /// that differs.  That allows optimizations that can read in large chunks.
+-    ///
+-    /// [valid]: crate::ptr#safety
+-    #[cfg(not(bootstrap))]
+-    #[rustc_const_unstable(feature = "const_intrinsic_compare_bytes", issue = "none")]
+-    #[rustc_nounwind]
+-    pub fn compare_bytes(left: *const u8, right: *const u8, bytes: usize) -> i32;
+-
+     /// See documentation of [`std::hint::black_box`] for details.
+     ///
+     /// [`std::hint::black_box`]: crate::hint::black_box
+@@ -2842,20 +2823,5 @@
+             [T](dst: *mut T) => is_aligned_and_not_null(dst)
+         );
+         write_bytes(dst, val, count)
+-    }
+-}
+-
+-/// Backfill for bootstrap
+-#[cfg(bootstrap)]
+-pub unsafe fn compare_bytes(left: *const u8, right: *const u8, bytes: usize) -> i32 {
+-    extern "C" {
+-        fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> crate::ffi::c_int;
+-    }
+-
+-    if bytes != 0 {
+-        // SAFETY: Since bytes is non-zero, the caller has met `memcmp`'s requirements.
+-        unsafe { memcmp(left, right, bytes).into() }
+-    } else {
+-        0
+     }
+ }
+--- a/library/core/src/slice/cmp.rs
++++ b/library/core/src/slice/cmp.rs
+@@ -1,12 +1,22 @@
+ //! Comparison traits for `[T]`.
+ 
+ use crate::cmp::{self, BytewiseEq, Ordering};
+-use crate::intrinsics::compare_bytes;
++use crate::ffi;
+ use crate::mem;
+ 
+ use super::from_raw_parts;
+ use super::memchr;
+ 
++extern "C" {
++    /// Calls implementation provided memcmp.
++    ///
++    /// Interprets the data as u8.
++    ///
++    /// Returns 0 for equal, < 0 for less than and > 0 for greater
++    /// than.
++    fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> ffi::c_int;
++}
++
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<A, B> PartialEq<[B]> for [A]
+ where
+@@ -64,8 +74,7 @@
+     }
+ }
+ 
+-// When each element can be compared byte-wise, we can compare all the bytes
+-// from the whole size in one call to the intrinsics.
++// Use memcmp for bytewise equality when the types allow
+ impl<A, B> SlicePartialEq<B> for [A]
+ where
+     A: BytewiseEq<B>,
+@@ -79,7 +88,7 @@
+         // The two slices have been checked to have the same size above.
+         unsafe {
+             let size = mem::size_of_val(self);
+-            compare_bytes(self.as_ptr() as *const u8, other.as_ptr() as *const u8, size) == 0
++            memcmp(self.as_ptr() as *const u8, other.as_ptr() as *const u8, size) == 0
+         }
+     }
+ }
+@@ -174,7 +183,7 @@
+     }
+ }
+ 
+-// `compare_bytes` compares a sequence of unsigned bytes lexicographically.
++// memcmp compares a sequence of unsigned bytes lexicographically.
+ // this matches the order we want for [u8], but no others (not even [i8]).
+ impl SliceOrd for u8 {
+     #[inline]
+@@ -186,7 +195,7 @@
+         // SAFETY: `left` and `right` are references and are thus guaranteed to be valid.
+         // We use the minimum of both lengths which guarantees that both regions are
+         // valid for reads in that interval.
+-        let mut order = unsafe { compare_bytes(left.as_ptr(), right.as_ptr(), len) as isize };
++        let mut order = unsafe { memcmp(left.as_ptr(), right.as_ptr(), len) as isize };
+         if order == 0 {
+             order = diff;
+         }