/****************************************************************************** * * Copyright(c) 2007 - 2013 Realtek Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * ******************************************************************************/ #ifndef __OSDEP_LINUX_SERVICE_H_ #define __OSDEP_LINUX_SERVICE_H_ #include #include #include #include #include #include #include #include #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,5)) #include #endif //#include #include #include #include #include #include #include #include #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) #include #else #include #endif #include #include #include #include #include #include #include #include #include // for struct tasklet_struct #include #include #include #include #include #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,41)) #include #endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) #include #else #include #endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) #include #endif #ifdef RTK_DMP_PLATFORM #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,12)) #include #endif #include #endif #ifdef CONFIG_NET_RADIO #define CONFIG_WIRELESS_EXT #endif /* Monitor mode */ #include #ifdef CONFIG_IOCTL_CFG80211 /* #include */ #include #endif //CONFIG_IOCTL_CFG80211 #ifdef CONFIG_TCP_CSUM_OFFLOAD_TX #include #include #endif #ifdef CONFIG_HAS_EARLYSUSPEND #include #endif //CONFIG_HAS_EARLYSUSPEND #ifdef CONFIG_EFUSE_CONFIG_FILE #include #endif #ifdef CONFIG_USB_HCI #include #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) #include #else #include #endif #endif #ifdef CONFIG_BT_COEXIST_SOCKET_TRX #include #include #include #include #include #endif //CONFIG_BT_COEXIST_SOCKET_TRX #ifdef CONFIG_USB_HCI typedef struct urb * PURB; #if (LINUX_VERSION_CODE>=KERNEL_VERSION(2,6,22)) #ifdef CONFIG_USB_SUSPEND #define CONFIG_AUTOSUSPEND 1 #endif #endif #endif typedef struct semaphore _sema; typedef spinlock_t _lock; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) typedef struct mutex _mutex; #else typedef struct semaphore _mutex; #endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) typedef struct legacy_timer_emu { struct timer_list t; void (*function)(unsigned long); unsigned long data; } _timer; #else typedef struct timer_list _timer; #endif struct __queue { struct list_head queue; _lock lock; }; typedef struct sk_buff _pkt; typedef unsigned char _buffer; typedef struct __queue _queue; typedef struct list_head _list; typedef int _OS_STATUS; //typedef u32 _irqL; typedef unsigned long _irqL; typedef struct net_device * _nic_hdl; typedef void* _thread_hdl_; typedef int thread_return; typedef void* thread_context; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,17,0)) #define thread_exit() kthread_complete_and_exit(NULL, 0) #else #define thread_exit() complete_and_exit(NULL, 0) #endif typedef void timer_hdl_return; typedef void* timer_hdl_context; #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41)) typedef struct work_struct _workitem; #else typedef struct tq_struct _workitem; #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)) // Porting from linux kernel, for compatible with old kernel. static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) { return skb->tail; } static inline void skb_reset_tail_pointer(struct sk_buff *skb) { skb->tail = skb->data; } static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) { skb->tail = skb->data + offset; } static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) { return skb->end; } #endif __inline static _list *get_next(_list *list) { return list->next; } __inline static _list *get_list_head(_queue *queue) { return (&(queue->queue)); } #define LIST_CONTAINOR(ptr, type, member) \ ((type *)((char *)(ptr)-(SIZE_T)(&((type *)0)->member))) __inline static void _enter_critical(_lock *plock, _irqL *pirqL) { spin_lock_irqsave(plock, *pirqL); } __inline static void _exit_critical(_lock *plock, _irqL *pirqL) { spin_unlock_irqrestore(plock, *pirqL); } __inline static void _enter_critical_ex(_lock *plock, _irqL *pirqL) { spin_lock_irqsave(plock, *pirqL); } __inline static void _exit_critical_ex(_lock *plock, _irqL *pirqL) { spin_unlock_irqrestore(plock, *pirqL); } __inline static void _enter_critical_bh(_lock *plock, _irqL *pirqL) { spin_lock_bh(plock); } __inline static void _exit_critical_bh(_lock *plock, _irqL *pirqL) { spin_unlock_bh(plock); } __inline static int _enter_critical_mutex(_mutex *pmutex, _irqL *pirqL) { int ret = 0; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) //mutex_lock(pmutex); ret = mutex_lock_interruptible(pmutex); #else ret = down_interruptible(pmutex); #endif return ret; } __inline static void _exit_critical_mutex(_mutex *pmutex, _irqL *pirqL) { #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) mutex_unlock(pmutex); #else up(pmutex); #endif } __inline static void rtw_list_delete(_list *plist) { list_del_init(plist); } #define RTW_TIMER_HDL_ARGS void *FunctionContext #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) static void legacy_timer_emu_func(struct timer_list *t) { _timer *lt = from_timer(lt, t, t); lt->function(lt->data); } #endif __inline static void _init_timer(_timer *ptimer,_nic_hdl nic_hdl,void *pfunc,void* cntx) { //setup_timer(ptimer, pfunc,(u32)cntx); ptimer->function = pfunc; ptimer->data = (unsigned long)cntx; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) timer_setup(&ptimer->t, legacy_timer_emu_func, 0); #else init_timer(ptimer); #endif } __inline static void _set_timer(_timer *ptimer,u32 delay_time) { #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) mod_timer(&ptimer->t, (jiffies+(delay_time*HZ/1000))); #else mod_timer(ptimer, (jiffies+(delay_time*HZ/1000))); #endif } __inline static void _cancel_timer(_timer *ptimer,u8 *bcancelled) { #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) del_timer_sync(&ptimer->t); #else del_timer_sync(ptimer); #endif *bcancelled= _TRUE;//TRUE ==1; FALSE==0 } __inline static void _init_workitem(_workitem *pwork, void *pfunc, PVOID cntx) { #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) INIT_WORK(pwork, pfunc); #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41)) INIT_WORK(pwork, pfunc,pwork); #else INIT_TQUEUE(pwork, pfunc,pwork); #endif } __inline static void _set_workitem(_workitem *pwork) { #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41)) schedule_work(pwork); #else schedule_task(pwork); #endif } __inline static void _cancel_workitem_sync(_workitem *pwork) { #if (LINUX_VERSION_CODE>=KERNEL_VERSION(2,6,22)) cancel_work_sync(pwork); #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41)) flush_scheduled_work(); #else flush_scheduled_tasks(); #endif } // // Global Mutex: can only be used at PASSIVE level. // #define ACQUIRE_GLOBAL_MUTEX(_MutexCounter) \ { \ while (atomic_inc_return((atomic_t *)&(_MutexCounter)) != 1)\ { \ atomic_dec((atomic_t *)&(_MutexCounter)); \ msleep(10); \ } \ } #define RELEASE_GLOBAL_MUTEX(_MutexCounter) \ { \ atomic_dec((atomic_t *)&(_MutexCounter)); \ } static inline int rtw_netif_queue_stopped(struct net_device *pnetdev) { #if (LINUX_VERSION_CODE>=KERNEL_VERSION(2,6,35)) return (netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 0)) && netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 1)) && netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 2)) && netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 3)) ); #else return netif_queue_stopped(pnetdev); #endif } static inline void rtw_netif_wake_queue(struct net_device *pnetdev) { #if (LINUX_VERSION_CODE>=KERNEL_VERSION(2,6,35)) netif_tx_wake_all_queues(pnetdev); #else netif_wake_queue(pnetdev); #endif } static inline void rtw_netif_start_queue(struct net_device *pnetdev) { #if (LINUX_VERSION_CODE>=KERNEL_VERSION(2,6,35)) netif_tx_start_all_queues(pnetdev); #else netif_start_queue(pnetdev); #endif } static inline void rtw_netif_stop_queue(struct net_device *pnetdev) { #if (LINUX_VERSION_CODE>=KERNEL_VERSION(2,6,35)) netif_tx_stop_all_queues(pnetdev); #else netif_stop_queue(pnetdev); #endif } static inline void rtw_merge_string(char *dst, int dst_len, const char *src1, const char *src2) { int len = 0; len += snprintf(dst+len, dst_len - len, "%s", src1); len += snprintf(dst+len, dst_len - len, "%s", src2); } #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) #define rtw_signal_process(pid, sig) kill_pid(find_vpid((pid)),(sig), 1) #else //(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) #define rtw_signal_process(pid, sig) kill_proc((pid), (sig), 1) #endif //(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) // Suspend lock prevent system from going suspend #ifdef CONFIG_WAKELOCK #include #elif defined(CONFIG_ANDROID_POWER) #include #endif // limitation of path length #define PATH_LENGTH_MAX PATH_MAX //Atomic integer operations #define ATOMIC_T atomic_t #define rtw_netdev_priv(netdev) ( ((struct rtw_netdev_priv_indicator *)netdev_priv(netdev))->priv ) #define NDEV_FMT "%s" #define NDEV_ARG(ndev) ndev->name #define ADPT_FMT "%s" #define ADPT_ARG(adapter) adapter->pnetdev->name #define FUNC_NDEV_FMT "%s(%s)" #define FUNC_NDEV_ARG(ndev) __func__, ndev->name #define FUNC_ADPT_FMT "%s(%s)" #define FUNC_ADPT_ARG(adapter) __func__, adapter->pnetdev->name struct rtw_netdev_priv_indicator { void *priv; u32 sizeof_priv; }; struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv, void *old_priv); extern struct net_device * rtw_alloc_etherdev(int sizeof_priv); #define STRUCT_PACKED __attribute__ ((packed)) #endif