मैं स्विफ्ट की गुप्त विधि के साथ इतना निराश हो गया चाहते हैं पैरामीटर bitPattern: और truncatingBitPattern: और मुझे याद रखने में असमर्थता कि किसके लिए उपयोग करना है, मैंने निम्न श्रेणी बनाई जिसमें रूपांतरण विधियों की एक बड़ी संख्या है।
मैं जरूरी नहीं है कि आप इसे अपने कार्यक्रम में शामिल करें। मुझे यकीन है कि बहुत से लोग कहेंगे कि स्विफ्ट हमें खुद से बचाने की कोशिश कर रहा है और यह कि वह प्रयास गूंगा है।तो हो सकता है कि आपको इस फ़ाइल को कहीं भी धोखा शीट के रूप में रखना चाहिए ताकि आप जल्दी से रूपांतरण कैसे करें, और आवश्यकता होने पर पैरामीटर को अपने प्रोग्राम में कॉपी कर सकें।
संयोग से, जेडीआई "बस इसे करें" के लिए खड़ा है।
/// Class containing a large number of static methods to convert an Int to a UInt or vice-versa, and
/// also to perform conversions between different bit sizes, for example UInt32 to UInt8.
///
/// Many of these "conversions" are trivial, and are only included for the sake of completeness.
///
/// A few of the conversions involving Int and UInt can give different results when run on 32-bit
/// and 64-bit systems. All of the conversion where the bit size of both the source and the target
/// are specified will always give the same result independent of platform.
public class JDI {
// MARK: - To signed Int
// To Int8
public static func ToInt8(_ x : Int8) -> Int8 {
return x
}
public static func ToInt8(_ x : Int32) -> Int8 {
return Int8(truncatingBitPattern: x)
}
public static func ToInt8(_ x : Int64) -> Int8 {
return Int8(truncatingBitPattern: x)
}
public static func ToInt8(_ x : Int) -> Int8 {
return Int8(truncatingBitPattern: x)
}
public static func ToInt8(_ x : UInt8) -> Int8 {
return Int8(bitPattern: x)
}
public static func ToInt8(_ x : UInt32) -> Int8 {
return Int8(truncatingBitPattern: x)
}
public static func ToInt8(_ x : UInt64) -> Int8 {
return Int8(truncatingBitPattern: x)
}
public static func ToInt8(_ x : UInt) -> Int8 {
return Int8(truncatingBitPattern: x)
}
// To Int32
public static func ToInt32(_ x : Int8) -> Int32 {
return Int32(x)
}
public static func ToInt32(_ x : Int32) -> Int32 {
return x
}
public static func ToInt32(_ x : Int64) -> Int32 {
return Int32(truncatingBitPattern: x)
}
public static func ToInt32(_ x : Int) -> Int32 {
return Int32(truncatingBitPattern: x)
}
public static func ToInt32(_ x : UInt8) -> Int32 {
return Int32(x)
}
public static func ToInt32(_ x : UInt32) -> Int32 {
return Int32(bitPattern: x)
}
public static func ToInt32(_ x : UInt64) -> Int32 {
return Int32(truncatingBitPattern: x)
}
public static func ToInt32(_ x : UInt) -> Int32 {
return Int32(truncatingBitPattern: x)
}
// To Int64
public static func ToInt64(_ x : Int8) -> Int64 {
return Int64(x)
}
public static func ToInt64(_ x : Int32) -> Int64 {
return Int64(x)
}
public static func ToInt64(_ x : Int64) -> Int64 {
return x
}
public static func ToInt64(_ x : Int) -> Int64 {
return Int64(x)
}
public static func ToInt64(_ x : UInt8) -> Int64 {
return Int64(x)
}
public static func ToInt64(_ x : UInt32) -> Int64 {
return Int64(x)
}
public static func ToInt64(_ x : UInt64) -> Int64 {
return Int64(bitPattern: x)
}
public static func ToInt64(_ x : UInt) -> Int64 {
return Int64(bitPattern: UInt64(x)) // Does not extend high bit of 32-bit input
}
// To Int
public static func ToInt(_ x : Int8) -> Int {
return Int(x)
}
public static func ToInt(_ x : Int32) -> Int {
return Int(x)
}
public static func ToInt(_ x : Int64) -> Int {
return Int(truncatingBitPattern: x)
}
public static func ToInt(_ x : Int) -> Int {
return x
}
public static func ToInt(_ x : UInt8) -> Int {
return Int(x)
}
public static func ToInt(_ x : UInt32) -> Int {
if MemoryLayout<Int>.size == MemoryLayout<Int32>.size {
return Int(Int32(bitPattern: x)) // For 32-bit systems, non-authorized interpretation
}
return Int(x)
}
public static func ToInt(_ x : UInt64) -> Int {
return Int(truncatingBitPattern: x)
}
public static func ToInt(_ x : UInt) -> Int {
return Int(bitPattern: x)
}
// MARK: - To unsigned Int
// To UInt8
public static func ToUInt8(_ x : Int8) -> UInt8 {
return UInt8(bitPattern: x)
}
public static func ToUInt8(_ x : Int32) -> UInt8 {
return UInt8(truncatingBitPattern: x)
}
public static func ToUInt8(_ x : Int64) -> UInt8 {
return UInt8(truncatingBitPattern: x)
}
public static func ToUInt8(_ x : Int) -> UInt8 {
return UInt8(truncatingBitPattern: x)
}
public static func ToUInt8(_ x : UInt8) -> UInt8 {
return x
}
public static func ToUInt8(_ x : UInt32) -> UInt8 {
return UInt8(truncatingBitPattern: x)
}
public static func ToUInt8(_ x : UInt64) -> UInt8 {
return UInt8(truncatingBitPattern: x)
}
public static func ToUInt8(_ x : UInt) -> UInt8 {
return UInt8(truncatingBitPattern: x)
}
// To UInt32
public static func ToUInt32(_ x : Int8) -> UInt32 {
return UInt32(bitPattern: Int32(x)) // Extend sign bit, assume minus input significant
}
public static func ToUInt32(_ x : Int32) -> UInt32 {
return UInt32(bitPattern: x)
}
public static func ToUInt32(_ x : Int64) -> UInt32 {
return UInt32(truncatingBitPattern: x)
}
public static func ToUInt32(_ x : Int) -> UInt32 {
return UInt32(truncatingBitPattern: x)
}
public static func ToUInt32(_ x : UInt8) -> UInt32 {
return UInt32(x)
}
public static func ToUInt32(_ x : UInt32) -> UInt32 {
return x
}
public static func ToUInt32(_ x : UInt64) -> UInt32 {
return UInt32(truncatingBitPattern: x)
}
public static func ToUInt32(_ x : UInt) -> UInt32 {
return UInt32(truncatingBitPattern: x)
}
// To UInt64
public static func ToUInt64(_ x : Int8) -> UInt64 {
return UInt64(bitPattern: Int64(x)) // Extend sign bit, assume minus input significant
}
public static func ToUInt64(_ x : Int32) -> UInt64 {
return UInt64(bitPattern: Int64(x)) // Extend sign bit, assume minus input significant
}
public static func ToUInt64(_ x : Int64) -> UInt64 {
return UInt64(bitPattern: x)
}
public static func ToUInt64(_ x : Int) -> UInt64 {
return UInt64(bitPattern: Int64(x)) // Extend sign bit if necessary, assume minus input significant
}
public static func ToUInt64(_ x : UInt8) -> UInt64 {
return UInt64(x)
}
public static func ToUInt64(_ x : UInt32) -> UInt64 {
return UInt64(x)
}
public static func ToUInt64(_ x : UInt64) -> UInt64 {
return x
}
public static func ToUInt64(_ x : UInt) -> UInt64 {
return UInt64(x) // Does not extend high bit of 32-bit input
}
// To UInt
public static func ToUInt(_ x : Int8) -> UInt {
return UInt(bitPattern: Int(x)) // Extend sign bit, assume minus input significant
}
public static func ToUInt(_ x : Int32) -> UInt {
return UInt(truncatingBitPattern: Int64(x)) // Extend sign bit, assume minus input significant
}
public static func ToUInt(_ x : Int64) -> UInt {
return UInt(truncatingBitPattern: x)
}
public static func ToUInt(_ x : Int) -> UInt {
return UInt(bitPattern: x)
}
public static func ToUInt(_ x : UInt8) -> UInt {
return UInt(x)
}
public static func ToUInt(_ x : UInt32) -> UInt {
return UInt(x)
}
public static func ToUInt(_ x : UInt64) -> UInt {
return UInt(truncatingBitPattern: x)
}
public static func ToUInt(_ x : UInt) -> UInt {
return x
}
}
यहाँ कुछ परीक्षण कोड है:
public func doTest() {
// To Int8
assert(JDI.ToInt8(42 as Int8) == 42)
assert(JDI.ToInt8(-13 as Int8) == -13)
assert(JDI.ToInt8(42 as Int32) == 42)
assert(JDI.ToInt8(257 as Int32) == 1)
assert(JDI.ToInt8(42 as Int64) == 42)
assert(JDI.ToInt8(257 as Int64) == 1)
assert(JDI.ToInt8(42 as Int) == 42)
assert(JDI.ToInt8(257 as Int) == 1)
assert(JDI.ToInt8(42 as UInt8) == 42)
assert(JDI.ToInt8(0xf3 as UInt8) == -13)
assert(JDI.ToInt8(42 as UInt32) == 42)
assert(JDI.ToInt8(0xfffffff3 as UInt32) == -13)
assert(JDI.ToInt8(42 as UInt64) == 42)
assert(JDI.ToInt8(UInt64.max - 12) == -13)
assert(JDI.ToInt8(42 as UInt) == 42)
assert(JDI.ToInt8(UInt.max - 12) == -13)
// To Int32
assert(JDI.ToInt32(42 as Int8) == 42)
assert(JDI.ToInt32(-13 as Int8) == -13)
assert(JDI.ToInt32(42 as Int32) == 42)
assert(JDI.ToInt32(-13 as Int32) == -13)
assert(JDI.ToInt32(42 as Int64) == 42)
assert(JDI.ToInt32(Int64(Int32.min) - 1) == Int32.max)
assert(JDI.ToInt32(42 as Int) == 42)
assert(JDI.ToInt32(-13 as Int) == -13)
assert(JDI.ToInt32(42 as UInt8) == 42)
assert(JDI.ToInt32(0xf3 as UInt8) == 243)
assert(JDI.ToInt32(42 as UInt32) == 42)
assert(JDI.ToInt32(0xfffffff3 as UInt32) == -13)
assert(JDI.ToInt32(42 as UInt64) == 42)
assert(JDI.ToInt32(UInt64.max - 12) == -13)
assert(JDI.ToInt32(42 as UInt) == 42)
assert(JDI.ToInt32(UInt.max - 12) == -13)
// To Int64
assert(JDI.ToInt64(42 as Int8) == 42)
assert(JDI.ToInt64(-13 as Int8) == -13)
assert(JDI.ToInt64(42 as Int32) == 42)
assert(JDI.ToInt64(-13 as Int32) == -13)
assert(JDI.ToInt64(42 as Int64) == 42)
assert(JDI.ToInt64(-13 as Int64) == -13)
assert(JDI.ToInt64(42 as Int) == 42)
assert(JDI.ToInt64(-13 as Int) == -13)
assert(JDI.ToInt64(42 as UInt8) == 42)
assert(JDI.ToInt64(0xf3 as UInt8) == 243)
assert(JDI.ToInt64(42 as UInt32) == 42)
assert(JDI.ToInt64(0xfffffff3 as UInt32) == 4294967283)
assert(JDI.ToInt64(42 as UInt64) == 42)
assert(JDI.ToInt64(UInt64.max - 12) == -13)
assert(JDI.ToInt64(42 as UInt) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToInt64(UInt.max - 12) == 4294967283) // For 32-bit systems
#else
assert(JDI.ToInt64(UInt.max - 12) == -13) // For 64-bit systems
#endif
// To Int
assert(JDI.ToInt(42 as Int8) == 42)
assert(JDI.ToInt(-13 as Int8) == -13)
assert(JDI.ToInt(42 as Int32) == 42)
assert(JDI.ToInt(-13 as Int32) == -13)
assert(JDI.ToInt(42 as Int64) == 42)
assert(JDI.ToInt(-13 as Int64) == -13)
assert(JDI.ToInt(42 as Int) == 42)
assert(JDI.ToInt(-13 as Int) == -13)
assert(JDI.ToInt(42 as UInt8) == 42)
assert(JDI.ToInt(0xf3 as UInt8) == 243)
assert(JDI.ToInt(42 as UInt32) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToInt(0xfffffff3 as UInt32) == -13) // For 32-bit systems
#else
assert(JDI.ToInt(0xfffffff3 as UInt32) == 4294967283) // For 64-bit systems
#endif
assert(JDI.ToInt(42 as UInt64) == 42)
assert(JDI.ToInt(UInt64.max - 12) == -13)
assert(JDI.ToInt(42 as UInt) == 42)
assert(JDI.ToInt(UInt.max - 12) == -13)
// To UInt8
assert(JDI.ToUInt8(42 as Int8) == 42)
assert(JDI.ToUInt8(-13 as Int8) == 0xf3)
assert(JDI.ToUInt8(42 as Int32) == 42)
assert(JDI.ToUInt8(-13 as Int32) == 0xf3)
assert(JDI.ToUInt8(42 as Int64) == 42)
assert(JDI.ToUInt8(-13 as Int64) == 0xf3)
assert(JDI.ToUInt8(Int64.max - 12) == 0xf3)
assert(JDI.ToUInt8(42 as Int) == 42)
assert(JDI.ToUInt8(-13 as Int) == 0xf3)
assert(JDI.ToUInt8(Int.max - 12) == 0xf3)
assert(JDI.ToUInt8(42 as UInt8) == 42)
assert(JDI.ToUInt8(0xf3 as UInt8) == 0xf3)
assert(JDI.ToUInt8(42 as UInt32) == 42)
assert(JDI.ToUInt8(0xfffffff3 as UInt32) == 0xf3)
assert(JDI.ToUInt8(42 as UInt64) == 42)
assert(JDI.ToUInt8(UInt64.max - 12) == 0xf3)
assert(JDI.ToUInt8(42 as UInt) == 42)
assert(JDI.ToUInt8(UInt.max - 12) == 0xf3)
// To UInt32
assert(JDI.ToUInt32(42 as Int8) == 42)
assert(JDI.ToUInt32(-13 as Int8) == 0xfffffff3)
assert(JDI.ToUInt32(42 as Int32) == 42)
assert(JDI.ToUInt32(-13 as Int32) == 0xfffffff3)
assert(JDI.ToUInt32(42 as Int64) == 42)
assert(JDI.ToUInt32(-13 as Int64) == 0xfffffff3)
assert(JDI.ToUInt32(Int64.max - 12) == 0xfffffff3)
assert(JDI.ToUInt32(42 as Int) == 42)
assert(JDI.ToUInt32(-13 as Int) == 0xfffffff3)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt32(Int.max - 12) == 0x7ffffff3) // For 32-bit systems
#else
assert(JDI.ToUInt32(Int.max - 12) == 0xfffffff3) // For 64-bit systems
#endif
assert(JDI.ToUInt32(42 as UInt8) == 42)
assert(JDI.ToUInt32(0xf3 as UInt8) == 0xf3)
assert(JDI.ToUInt32(42 as UInt32) == 42)
assert(JDI.ToUInt32(0xfffffff3 as UInt32) == 0xfffffff3)
assert(JDI.ToUInt32(42 as UInt64) == 42)
assert(JDI.ToUInt32(UInt64.max - 12) == 0xfffffff3)
assert(JDI.ToUInt32(42 as UInt) == 42)
assert(JDI.ToUInt32(UInt.max - 12) == 0xfffffff3)
// To UInt64
assert(JDI.ToUInt64(42 as Int8) == 42)
assert(JDI.ToUInt64(-13 as Int8) == 0xfffffffffffffff3)
assert(JDI.ToUInt64(42 as Int32) == 42)
assert(JDI.ToUInt64(-13 as Int32) == 0xfffffffffffffff3)
assert(JDI.ToUInt64(42 as Int64) == 42)
assert(JDI.ToUInt64(-13 as Int64) == 0xfffffffffffffff3)
assert(JDI.ToUInt64(Int64.max - 12) == (UInt64.max >> 1) - 12)
assert(JDI.ToUInt64(42 as Int) == 42)
assert(JDI.ToUInt64(-13 as Int) == 0xfffffffffffffff3)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt64(Int.max - 12) == 0x7ffffff3) // For 32-bit systems
#else
assert(JDI.ToUInt64(Int.max - 12) == 0x7ffffffffffffff3) // For 64-bit systems
#endif
assert(JDI.ToUInt64(42 as UInt8) == 42)
assert(JDI.ToUInt64(0xf3 as UInt8) == 0xf3)
assert(JDI.ToUInt64(42 as UInt32) == 42)
assert(JDI.ToUInt64(0xfffffff3 as UInt32) == 0xfffffff3)
assert(JDI.ToUInt64(42 as UInt64) == 42)
assert(JDI.ToUInt64(UInt64.max - 12) == 0xfffffffffffffff3)
assert(JDI.ToUInt64(42 as UInt) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt64(UInt.max - 12) == 0xfffffff3) // For 32-bit systems
#else
assert(JDI.ToUInt64(UInt.max - 12) == 0xfffffffffffffff3) // For 64-bit systems
#endif
// To UInt
assert(JDI.ToUInt(42 as Int8) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt(-13 as Int8) == 0xfffffff3) // For 32-bit systems
#else
assert(JDI.ToUInt(-13 as Int8) == 0xfffffffffffffff3) // For 64-bit systems
#endif
assert(JDI.ToUInt(42 as Int32) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt(-13 as Int32) == 0xfffffff3) // For 32-bit systems
#else
assert(JDI.ToUInt(-13 as Int32) == 0xfffffffffffffff3) // For 64-bit systems
#endif
assert(JDI.ToUInt(42 as Int64) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt(-13 as Int64) == 0xfffffff3) // For 32-bit systems
assert(JDI.ToUInt(Int64.max - 12) == 0xfffffff3)
#else
assert(JDI.ToUInt(-13 as Int64) == 0xfffffffffffffff3) // For 64-bit systems
assert(JDI.ToUInt(Int64.max - 12) == 0x7ffffffffffffff3)
#endif
assert(JDI.ToUInt(42 as Int) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt(Int.max - 12) == 0x7ffffff3) // For 32-bit systems
#else
assert(JDI.ToUInt(Int.max - 12) == 0x7ffffffffffffff3) // For 64-bit systems
#endif
assert(JDI.ToUInt(42 as UInt8) == 42)
assert(JDI.ToUInt(0xf3 as UInt8) == 0xf3)
assert(JDI.ToUInt(42 as UInt32) == 42)
assert(JDI.ToUInt(0xfffffff3 as UInt32) == 0xfffffff3)
assert(JDI.ToUInt(42 as UInt64) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt(UInt64.max - 12) == 0xfffffff3) // For 32-bit systems
#else
assert(JDI.ToUInt(UInt64.max - 12) == 0xfffffffffffffff3) // For 64-bit systems
#endif
assert(JDI.ToUInt(42 as UInt) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt(UInt.max - 12) == 0xfffffff3) // For 32-bit systems
#else
assert(JDI.ToUInt(UInt.max - 12) == 0xfffffffffffffff3) // For 64-bit systems
#endif
print("\nTesting JDI complete.\n")
}
इन अपने खुद के तरीके हैं, तो आप बस Int' बजाय 'UInt' की' का उपयोग करना चाहिए। भले ही मूल्य नकारात्मक न हों, फिर भी स्विफ्ट दस्तावेज़ीकरण की सिफारिश अनिवार्य रूप से हमेशा 'Int' का उपयोग करने के लिए होती है जब तक कि आपके पास अन्यथा करने का कोई विशिष्ट कारण न हो (उदाहरण के लिए सी कार्यों के साथ अंतःक्रियाशीलता)। –
ये MWPhotoBrowser (ObjectiveC) से हैं। मैंने ब्रिजिंग हेडर का उपयोग करने के लिए मुझे इसका उपयोग करने के लिए उपयोग किया है, लेकिन यह इस तरह से थोड़ा सा विचित्र है। मेरे पास UIPhotoBrowser को UINT या फोर्क का उपयोग करने के अलावा कोई विकल्प नहीं है और इसे संपादित करें (बनाए रखने के लिए थोड़ा कठिन) – Akshat